hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
9afcb307c74d9f3a27088e4d70b60b79824319aa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "setup_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
hiprandState_t *state = NULL;
hipMalloc(&state, XSIZE*YSIZE);
unsigned long long seed = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
setup_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, state,seed);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
setup_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, state,seed);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
setup_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, state,seed);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9afcb307c74d9f3a27088e4d70b60b79824319aa.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "setup_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
curandState *state = NULL;
cudaMalloc(&state, XSIZE*YSIZE);
unsigned long long seed = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
setup_kernel<<<gridBlock,threadBlock>>>(state,seed);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
setup_kernel<<<gridBlock,threadBlock>>>(state,seed);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
setup_kernel<<<gridBlock,threadBlock>>>(state,seed);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4d54eed79eb9145ec50b468edd010a642bd2ad6a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "assisted_activation2_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float alpha = 2;
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
float *gt_gpu = NULL;
hipMalloc(>_gpu, XSIZE*YSIZE);
float *a_avg_gpu = NULL;
hipMalloc(&a_avg_gpu, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int channels = 1;
int batches = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
assisted_activation2_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, alpha,output,gt_gpu,a_avg_gpu,size,channels,batches);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
assisted_activation2_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, alpha,output,gt_gpu,a_avg_gpu,size,channels,batches);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
assisted_activation2_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, alpha,output,gt_gpu,a_avg_gpu,size,channels,batches);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4d54eed79eb9145ec50b468edd010a642bd2ad6a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "assisted_activation2_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float alpha = 2;
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
float *gt_gpu = NULL;
cudaMalloc(>_gpu, XSIZE*YSIZE);
float *a_avg_gpu = NULL;
cudaMalloc(&a_avg_gpu, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int channels = 1;
int batches = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
assisted_activation2_kernel<<<gridBlock,threadBlock>>>(alpha,output,gt_gpu,a_avg_gpu,size,channels,batches);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
assisted_activation2_kernel<<<gridBlock,threadBlock>>>(alpha,output,gt_gpu,a_avg_gpu,size,channels,batches);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
assisted_activation2_kernel<<<gridBlock,threadBlock>>>(alpha,output,gt_gpu,a_avg_gpu,size,channels,batches);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
aac67e57114c2982253881c61092800d4dd5a2ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* codeml.c (aaml.c & codonml.c)
Maximum likelihood parameter estimation for codon sequences (seqtype=1)
or amino-acid sequences (seqtype=2)
Copyright, Ziheng YANG, 1993-2003
cc -o codeml -fast codeml.c tools.o -lm
codeml <ControlFileName>
*/
/*
#define NSSITESBandits
#define DSDN_MC 1
#define DSDN_MC_SITES 1
*/
#include "paml.h"
#define NS 7000
#define NBRANCH (NS*2-2)
#define NNODE (NS*2-1)
#define MAXNSONS 100
#define NGENE 2000
#define LSPNAME 50
#define NCODE 64
#define NCATG 513
#define NBTYPE 17
#define NP (NBRANCH*2+NGENE-1+2+NCODE+2)
/*
#define NP (NBRANCH+NGENE-1+189+2+NCODE+2)
*/
extern char BASEs[],AAs[];
extern int noisy, NFunCall, NEigenQ, NPMatUVRoot, *ancestor, GeneticCode[][64];
extern double *SeqDistance;
int Forestry (FILE *fout);
int GetMemPUVR(int nc, int nUVR);
int sortwM3(double x[]);
void DetailOutput(FILE *fout, double x[], double var[]);
int GetOptions (char *ctlf);
int testx (double x[], int np);
int SetxBound (int np, double xb[][2]);
int SetxInitials (int np, double x[], double xb[][2]);
int GetInitials (double x[], int*fromfile);
double *PointKappa (double xcom[], int igene);
double *PointOmega (double xcom[], int igene, int inode, int isiteclass);
int GetCodonFreqs (void);
int SetParameters (double x[]);
int SetParametersNSsites (double x[]);
int Set_UVR_BranchSite (int iclass, int branchlabel);
int SetPGene (int igene, int _pi, int _UVRoot, int _alpha, double x[]);
int SetPSiteClass(int iclass, double x[]);
int PMatJC69like (double P[], double t, int n);
int printfcode (FILE *fout, double fb61[], double space[]);
int InitializeCodon (FILE *fout, double space[]);
int AA2Codonf (double faa[20], double fcodon[]);
int DistanceMatAA (FILE *fout);
int GetDaa(FILE *fout, double daa[]);
void getpcodonClass(double x[], double pcodonClass[]);
int SelectionCoefficients (FILE* fout, double kappa[], double ppi[], double omega);
int EigenQcodon(int mode, double blength, double *S, double *dS, double *dN,
double Root[], double U[], double V[], double *meanrate, double kappa[], double omega, double Q[]);
int EigenQaa(FILE *fout, double Root[], double U[], double V[],double rate[]);
int Qcodon2aa(double Qc[], double pic[], double Qaa[], double piaa[]);
int SetAA1STEP(void);
int GetOmegaAA(int OmegaAA[]);
int TestModelQc(FILE *fout, double x[]);
double lfun2dSdN(double x[], int np);
int VariancedSdN(double t, double omega, double vtw[2*2], double vdSdN[2*2]);
int GetCodonFreqs2 (void);
int PairwiseCodon(FILE *fout, FILE*fds, FILE*fdn, FILE*dt, double space[]);
int PairwiseAA(FILE *fout, FILE *f2AA);
int lfunNSsites_rate(FILE* fout, double x[], int np);
int lfunNSsites_M2M8(FILE* frst, double x[], int np);
int lfunNSsites_AC(FILE* frst, double x[], int np);
double GetBranchRate(int igene, int ibrate, double x[], int *ix);
int GetPMatBranch(double Pt[], double x[], double t, int inode);
int ConditionalPNode(int inode, int igene, double x[]);
double CDFdN_dS(double x,double par[]);
int DiscreteNSsites(double par[]);
char GetAASiteSpecies(int species, int sitepatt);
void finishup(void);
int mergeSeqs(FILE*fout);
void Get4foldSites(void);
int AdHocRateSmoothing(FILE*fout, double x[NS*3], double xb[NS*3][2], double space[]);
void DatingHeteroData(FILE* fout);
int SlidingWindow(FILE*fout, FILE* fpair[], double space[]);
void SimulateData2s61(void);
void Ina(void);
void d4dSdN(FILE*fout);
struct common_info {
char *z[NS], *spname[NS], seqf[128],outf[128],treef[128],daafile[128], cleandata;
char oldconP[NNODE]; /* update conP for nodes? to save computation */
int seqtype, ns, ls, ngene, posG[NGENE+1], lgene[NGENE], npatt,*pose, readpattern;
int runmode,clock, verbose,print, codonf,aaDist,model,NSsites;
int nOmega, nbtype, nOmegaType; /* branch partition, AA pair (w) partition */
int method, icode, ncode, Mgene, ndata, bootstrap;
int fix_rgene,fix_kappa,fix_omega,fix_alpha,fix_rho,nparK,fix_blength,getSE;
int np, ntime, nrgene, nkappa, npi, nrate, nalpha, ncatG, hkyREV;
size_t sconP, sspace;
double *fpatt, *space, kappa,omega,alpha,rho,rgene[NGENE], TipDate, TipDate_TimeUnit;
double pi[NCODE], piG[NGENE][64], fb61[64];
double f3x4[NGENE][12], *pf3x4, piAA[20];
double freqK[NCATG], rK[NCATG], MK[NCATG*NCATG],daa[20*20], *conP, *fhK;
double (*plfun)(double x[],int np);
double omega_fix; /* fix the last w in the NSbranchB, NSbranch2 models
for lineages. Useful for testing whether w>1 for some lineages. */
int conPSiteClass; /* conPSiteClass=0 if (method==0) and =1 if (method==1)?? */
int NnodeScale;
char *nodeScale; /* nScale[ns-1] for interior nodes */
double *nodeScaleF; /* nScaleF[npatt] for scale factors */
/* pomega & pkappa are used to communicate between SetParameters & ConditionalPNode
& EigenQcodon. Try to remove them? */
double *pomega, pkappa[5], *ppi;
} com;
struct TREEB {
int nbranch, nnode, root, branches[NBRANCH][2];
double lnL;
} tree;
struct TREEN {
int father, nson, sons[MAXNSONS], ibranch, ipop;
double branch, age, omega, *conP, label;
char *nodeStr, fossil, usefossil;
} *nodes, **gnodes, nodes_t[2*NS-1];
/* for sptree.nodes[].fossil: lower, upper, bounds, gamma, inverse-gamma */
enum {LOWER_F=1, UPPER_F, BOUND_F} FOSSIL_FLAGS;
char *fossils[]={" ", "L", "U", "B"};
struct SPECIESTREE {
int nbranch, nnode, root, nspecies, nfossil;
struct TREESPN {
char name[LSPNAME+1], fossil, usefossil; /* fossil: 0, 1, 2, 3 */
int father, nson, sons[2];
double age, pfossil[7]; /* lower and upper bounds or alpha & beta */
double *lnrates; /* log rates for loci */
} nodes[2*NS-1];
} sptree;
/* all trees are binary & rooted, with ancestors unknown. */
struct DATA { /* locus-specific data and tree information */
int ns[NGENE], ls[NGENE], npatt[NGENE], ngene, lgene[NGENE];
int root[NGENE+1], BlengthMethod, fix_nu, nbrate[NGENE], icode[NGENE];
char *z[NGENE][NS], cleandata[NGENE];
char idaafile[NGENE], daafile[NGENE][40];
double *fpatt[NGENE], lnpT, lnpR, lnpDi[NGENE];
double Qfactor[NGENE], pi[NGENE][NCODE];
double rgene[NGENE], kappa[NGENE], alpha[NGENE], omega[NGENE];
int NnodeScale[NGENE];
char *nodeScale[NGENE]; /* nScale[data.ns[locus]-1] for interior nodes */
} data;
extern double Small_Diff;
int Nsensecodon, FROM61[64], FROM64[64], FourFold[4][4];
int ChangedInIteration; /* 1: t changed, update P(t); 2: paras changed, update UVRoot */
double *PMat, *U, *V, *Root, *_UU[NBTYPE+2], *_VV[NBTYPE+2], *_Root[NBTYPE+2];
/* 5 sets for branchsite models (YN2002); 6 sets for clade models */
double pcodon0[64],paa0[20], *pcodonClass; /* for aaDist=FIT1 or FIT2 */
int BayesEB; /* =1 for site models M2a & M8; =2 for branch-site models A & C */
int LASTROUND;
int IClass=-1;
int OmegaAA[190], AA1STEP[190];
double _rateSite=1;
double Qfactor_NS, Qfactor_NS_branch[NBTYPE];
int KGaussLegendreRule=16;
double AAchem[][20+1]={ /* last element is the max */
{8.1, 10.5, 11.6, 13, 5.5, 10.5, 12.3, 9, 10.4, 5.2,
4.9, 11.3, 5.7, 5.2, 8, 9.2, 8.6, 5.4, 6.2, 5.9, 13}, /* p */
{ 31, 124, 56, 54, 55, 85, 83, 3, 96, 111,
111, 119, 105, 132, 32.5, 32, 61, 170, 136, 84, 170}, /* v */
{0, 0.65, 1.33, 1.38, 2.75, 0.89, 0.92, 0.74, 0.58,
0, 0, 0.33, 0, 0, 0.39, 1.42, 0.71, 0.13, 0.2, 0, -999},/* c */
{-0.11, 0.079, -0.136, -0.285, -0.184, -0.067, -0.246, -0.073, 0.32, 0.001,
-0.008, 0.049, -0.041, 0.438, -0.016, -0.153, -0.208, 0.493, 0.381, -0.155} /* a */
}; /* in the order p, v, c, a */
FILE *fout, *frub, *flnf, *frst, *frst1, *frst2=NULL, *finitials;
char *ratef="rates";
enum {Fequal, F1x4, F3x4, Fcodon, F1x4MG, F3x4MG, FMutSel0, FMutSel} CodonFreqs;
char *codonfreqs[]={"Fequal", "F1x4", "F3x4", "Fcodon", "F1x4MG", "F3x4MG", "FMutSel0", "FMutSel"};
enum {NSbranchB=1, NSbranch2, NSbranch3} NSBranchModels;
char *NSbranchmodels[]={"One dN/dS ratio",
"free dN/dS Ratios for branches", "several dN/dS ratios for branches",
"NSbranch3"};
enum {Poisson, EqualInput, Empirical, Empirical_F,
FromCodon=6, REVaa_0=8, REVaa=9} AAModel;
char *aamodels[]={"Poisson", "EqualInput", "Empirical", "Empirical_F", "",
"", "FromCodon", "", "REVaa_0", "REVaa"};
enum {NSnneutral=1, NSpselection, NSdiscrete, NSfreqs, NSgamma, NS2gamma,
NSbeta, NSbetaw, NSbetagamma, NSbeta1gamma, NSbeta1normal, NS02normal,
NS3normal, NSM2aRel=22, NSTgamma, NSTinvgamma, NSTgamma1, NSTinvgamma1} NSsitesModels;
char *NSsitesmodels[]={"one-ratio","NearlyNeutral", "PositiveSelection","discrete","freqs",
"gamma","2gamma","beta","beta&w>1","beta&gamma", "beta&gamma+1",
"beta&normal>1", "0&2normal>0", "3normal>0", "", "", "", "", "", "", "", "",
"M2a_rel", "Tgamma", "Tinvgamma", "Tgamma+1", "Tinvgamma+1"};
int maxNSsitesModels=27;
enum {FIT1=11, FIT2=12} SiteClassModels;
enum {AAClasses=7 } aaDistModels;
char *clockstr[]={"", "Global clock", "Local clock", "ClockCombined"};
enum {GlobalClock=1, LocalClock, ClockCombined} ClockModels;
#define CODEML 1
#include "treesub.c"
#include "treespace.c"
/* variables for batch run of site models */
int ncatG0=10, insmodel=0, nnsmodels=1, nsmodels[15]={0};
/* used for sliding windows analysis */
int windowsize0=20, offset0=1, npositive=0;
double lnLmodel;
int main (int argc, char *argv[])
{
FILE *fseq=NULL, *fpair[6];
char pairfs[6][32]={"2NG.dS","2NG.dN","2NG.t", "2ML.dS","2ML.dN","2ML.t"};
char ctlf[96]="codeml.ctl", *pmodel, timestr[64];
char *seqtypestr[3]={"CODONML", "AAML", "CODON2AAML"};
char *Mgenestr[]={"diff. rate", "separate data", "diff. rate & pi",
"diff. rate & k&w", "diff. rate & pi & k&w"};
int getdistance=1, i, k, s2=0, idata, nc, nUVR, cleandata0;
#ifdef NSSITESBandits
atexit(finishup);
#endif
starttimer();
/*
printf("KGaussLegendreRule? ");
scanf("%d", &KGaussLegendreRule);
*/
com.ndata=1;
noisy=0; com.runmode=0;
com.clock=0; com.fix_rgene=0; /* 0: estimate rate factors for genes */
com.cleandata=0; /* 1: delete; 0:use missing data */
com.seqtype=AAseq;
com.model=Empirical_F;
strcpy(com.daafile, "jones.dat");
com.icode=0; com.nrate=0;
com.fix_kappa=0; com.kappa=1; com.omega=2.1;
com.fix_alpha=1; com.alpha=0.; com.ncatG=4; /* alpha=0 := inf */
com.fix_rho=1; com.rho=0.;
com.getSE=0; com.print=0; com.verbose=1; com.fix_blength=0;
com.method=0; com.space=NULL;
frub=gfopen("rub","w");
frst=gfopen("rst","w");
frst1=gfopen("rst1","w");
/*
mergeSeqs(frst); exit(0);
Ina();
*/
SetSeed(-1, 0);
#if (DSDN_MC || DSDN_MC_SITES)
SimulateData2s61();
#endif
if(argc>1) strncpy(ctlf, argv[1], 95);
GetOptions(ctlf);
cleandata0 = com.cleandata;
if(com.runmode!=-2) finitials=fopen("in.codeml","r");
else getdistance = 1;
fprintf(frst, "Supplemental results for CODEML (seqf: %s treef: %s)\n",
com.seqf, com.treef);
if(com.getSE==2) frst2=fopen("rst2","w");
printf("%s in %s\n", seqtypestr[com.seqtype-1], pamlVerStr);
fout = gfopen(com.outf, "w");
if(noisy && com.seqtype==CODONseq)
{ printcu(F0,NULL,com.icode); puts("Nice code, uuh?"); }
/* space for P&U&V&Root */
if(com.clock==5 || com.clock==6)
DatingHeteroData(fout);
nUVR=1; nc=20;
if(com.seqtype==CODONseq) {
nc = 64;
if(com.model>=1) nUVR = NBTYPE+2;
}
else if (com.seqtype==CODONseq || com.model==FromCodon)
nc = 64;
GetMemPUVR(nc, nUVR);
if((fseq=fopen(com.seqf,"r"))==NULL || com.seqf[0]=='\0') {
printf ("\n\nSequence file %s not found!\n", com.seqf);
exit (-1);
}
/* d4dSdN(fout); */
if (com.aaDist==AAClasses) {
SetAA1STEP();
GetOmegaAA(OmegaAA);
}
else if (com.seqtype==AAseq && com.model==REVaa_0)
SetAA1STEP();
if(com.seqtype==1) {
for(i=0; i<3; i++)
fpair[i]=(FILE*)gfopen(pairfs[i],"w");
if(com.runmode==-2)
for(; i<6;i++) fpair[i]=(FILE*)gfopen(pairfs[i],"w");
}
else if(com.runmode==-2)
fpair[0]=(FILE*)gfopen("2AA.t","w");
for (idata=0; idata<com.ndata; idata++) {
if (com.ndata>1) {
printf ("\nData set %d ", idata+1);
fprintf(fout, "\n\nData set %d\n", idata+1);
fprintf(frst,"\t%d",idata+1);
fprintf(frst1, "%d", idata+1);
fprintf(frub,"\nData set %2d\n",idata+1);
}
if(idata)
GetOptions(ctlf); /* warning: ndata, cleandata etc. are read again. */
if(nnsmodels>1) {
if(com.seqtype!=1) error2("batch run of site models requires codon seqs.");
if(com.fix_omega) error2("fix omega during batch run?");
if(com.model) error2("model should be 0 in the batch run?");
if(com.runmode) error2("runmode?");
/* for allocating memory com.fhK[] */
com.NSsites=NSbetaw; com.ncatG=ncatG0+1;
for(i=0; i<nnsmodels; i++)
if(nsmodels[i]>=NSTgamma || nsmodels[i]<=NSTinvgamma1)
com.ncatG = max2(com.ncatG, KGaussLegendreRule+1);
printf("NSsites batch run (ncatG as in YNGP2000): ");
for(i=0; i<nnsmodels; i++)
printf(" %2d", nsmodels[i]);
FPN(F0);
}
com.cleandata = cleandata0;
/* ReadSeq may change seqtype*/
ReadSeq((com.verbose?fout:NULL), fseq, com.cleandata);
SetMapAmbiguity();
/* AllPatterns(fout); */
fprintf(frst1,"\t%d\t%d\t%d", com.ns, com.ls, com.npatt);
if (com.ngene==1)
com.Mgene = 0;
if(com.ngene>1) {
if(com.seqtype==1 && com.npi)
error2("codon models (estFreq) not implemented for ngene > 1");
if(com.runmode==-2 && com.Mgene!=1) error2("use Mgene=1 for runmode=-2?");
if(com.model) error2("NSbranchsites with ngene.");
if(com.NSsites) error2("NSsites with ngene.");
if(com.aaDist>=FIT1) /* because of pcodon0[] */
{ error2("ngene for amino acid fitness models"); }
}
if(com.ndata==1) fclose(fseq);
i = (com.ns*2-1)*sizeof(struct TREEN);
if((nodes=(struct TREEN*)malloc(i))==NULL)
error2("oom nodes");
pmodel=(com.seqtype==CODONseq?NSbranchmodels[com.model]:aamodels[com.model]);
fprintf(fout,"%s (in %s) %s\n",seqtypestr[com.seqtype-1], pamlVerStr, com.seqf);
fprintf(fout,"Model: %s for branches, ", pmodel);
if(com.clock) fprintf(fout," %s ",clockstr[com.clock]);
if(com.seqtype==CODONseq||com.model==FromCodon) {
if(com.fix_kappa) fprintf(fout, " kappa = %.3f fixed\n", com.kappa);
if(com.fix_omega) fprintf(fout, " omega = %.3f fixed\n", com.omega);
}
if(com.seqtype==AAseq && (com.model==Empirical||com.model==Empirical_F))
fprintf (fout, " (%s) ", com.daafile);
if(com.seqtype==AAseq&&com.nrate) fprintf(fout,"(nrate:%d) ", com.nrate);
if(com.alpha && com.rho) fprintf (fout, "Auto-");
if(com.alpha) fprintf (fout, "dGamma (ncatG=%d) ", com.ncatG);
if(com.ngene>1)
fprintf (fout, " (%d genes: %s) ", com.ngene, Mgenestr[com.Mgene]);
if(com.alpha==0) com.nalpha=0;
else com.nalpha=(com.nalpha?com.ngene:!com.fix_alpha);
if(com.Mgene==1) com.nalpha=!com.fix_alpha;
if(com.nalpha>1 && (!com.alpha || com.ngene==1 || com.fix_alpha))
error2("Malpha");
if(com.nalpha>1 && com.rho) error2("Malpha or rho");
if(com.nalpha>1) fprintf (fout,"(%d gamma)", com.nalpha);
if(com.Mgene && com.ngene==1) error2("Mgene for one gene.");
if(com.seqtype==CODONseq) {
fprintf (fout, "\nCodon frequency model: %s\n", codonfreqs[com.codonf]);
if(com.alpha)
fputs("Warning: Gamma model for codons. See documentation.",fout);
}
if((com.seqtype==CODONseq||com.model==FromCodon)
&& (com.aaDist && com.aaDist<10 && com.aaDist!=AAClasses))
fprintf(fout,"%s, %s\n",com.daafile,(com.aaDist>0?"geometric":"linear"));
if(com.NSsites) {
fprintf(fout,"Site-class models: ");
if (nnsmodels==1) {
fprintf(fout," %s",NSsitesmodels[com.NSsites]);
if(com.NSsites>=NSdiscrete)fprintf(fout," (%d categories)",com.ncatG);
}
if(com.nparK) fprintf(fout," & HMM");
FPN(fout);
if(com.aaDist)
fprintf(fout,"\nFitness models: aaDist: %d\n",com.aaDist);
}
fprintf(fout,"ns = %3d ls = %3d\n\n", com.ns, com.ls);
com.sspace = max2(5000000,3*com.ncode*com.ncode*sizeof(double));
if(com.NSsites) {
if(com.sspace < 2*com.ncode*com.ncode+4*com.npatt*sizeof(double))
com.sspace = 2*com.ncode*com.ncode+4*com.npatt*sizeof(double);
}
k = com.ns*(com.ns-1)/2;
/*
com.sspace=max2(com.sspace,
(int)sizeof(double)*((com.ns*2-2)*(com.ns*2-2+4+k)+k));
*/
if((com.space = (double*)realloc(com.space,com.sspace))==NULL) {
printf("\nfailed to get %9lu bytes for space", com.sspace);
error2("oom space");
}
if(getdistance) {
SeqDistance=(double*)realloc(SeqDistance, k*sizeof(double));
ancestor=(int*)realloc(ancestor, k*sizeof(int));
if(SeqDistance==NULL||ancestor==NULL) error2("oom distance&ancestor");
for(i=0; i<k; i++) SeqDistance[i] = -1;
}
if(com.seqtype==AAseq) {
InitializeBaseAA (fout);
if (com.model==FromCodon /* ||com.aaDist==AAClasses */)
AA2Codonf(com.pi, com.fb61); /* get codon freqs from aa freqs */
}
else { /* codon sequences */
if(com.sspace < max2(com.ngene+1,com.ns)*(64+12+4)*sizeof(double)) {
com.sspace = max2(com.ngene+1,com.ns)*(64+12+4)*sizeof(double);
if((com.space = (double*)realloc(com.space,com.sspace))==NULL)
error2("oom space for #c");
}
if (InitializeCodon(fout,com.space))
error2("giving up on stop codons");
if(com.Mgene==3)
for(i=0; i<com.ngene; i++)
xtoy(com.pi,com.piG[i],com.ncode);
}
if(getdistance) {
if(com.seqtype==CODONseq)
DistanceMatNG86(fout,fpair[0],fpair[1],fpair[2],0);
else
DistanceMatAA(fout);
}
fflush(fout);
if(com.seqtype==AAseq && com.model==Poisson && !com.print)
PatternWeightJC69like(fout);
if(com.alpha || com.NSsites) {
s2=com.npatt*com.ncatG*sizeof(double);
if((com.fhK=(double*)realloc(com.fhK,s2))==NULL) error2("oom fhK");
}
if(com.runmode==-2 && com.Mgene!=1) {
if(com.seqtype==CODONseq)
PairwiseCodon(fout,fpair[3],fpair[4],fpair[5],com.space);
else
PairwiseAA(fout, fpair[0]);
}
else {
com.sconP = 2L *com.ncode*com.npatt*sizeof(double);
/* to be increased later in GetInitials() */
/* com.sconP = (com.ns-1)*com.ncode*com.npatt*sizeof(double); */
com.conP = (double*)realloc(com.conP, com.sconP);
printf("\n%9u bytes for distance",com.ns*(com.ns-1)/2*sizeof(double));
printf("\n%9u bytes for conP\n", com.sconP);
printf ("%9u bytes for fhK\n%9u bytes for space\n", s2, com.sspace);
if(com.conP==NULL)
error2("oom conP");
if (nnsmodels>1) {
for(insmodel=0; insmodel<nnsmodels; insmodel++) {
com.NSsites = nsmodels[insmodel];
if(com.NSsites<=NSpselection)
com.ncatG = com.NSsites+1;
else if(com.NSsites==NSM2aRel || com.NSsites==NSdiscrete)
com.ncatG = 3;
else if (com.NSsites==NSfreqs)
com.ncatG=5;
else if (com.NSsites==NSbetaw||com.NSsites==NS02normal)
com.ncatG = ncatG0 + 1;
else
com.ncatG = ncatG0;
if(com.NSsites==NSTgamma || com.NSsites==NSTinvgamma)
com.ncatG=KGaussLegendreRule;
if(com.NSsites==NSTgamma1 || com.NSsites==NSTinvgamma1)
com.ncatG=KGaussLegendreRule+1;
com.nrate = com.nkappa=(com.hkyREV?5:!com.fix_kappa);
if(com.NSsites==0 || com.NSsites==NSbetaw) com.nrate += !com.fix_omega;
else if(com.NSsites==NSnneutral) com.nrate ++;
else if(com.NSsites==NSpselection || com.NSsites==NSM2aRel)
com.nrate += 1+!com.fix_omega;
else if(com.NSsites==NSdiscrete)
com.nrate += com.ncatG;
printf("\n\nModel %d: %s\n",com.NSsites, NSsitesmodels[com.NSsites]);
fprintf(fout,"\n\nModel %d: %s",com.NSsites,NSsitesmodels[com.NSsites]);
fprintf(frst,"\n\nModel %d: %s",com.NSsites,NSsitesmodels[com.NSsites]);
fprintf(frub,"\n\nModel %d: %s",com.NSsites,NSsitesmodels[com.NSsites]);
if(com.NSsites) fprintf(fout," (%d categories)",com.ncatG);
FPN(fout);
#ifdef NSSITESBandits
com.fix_blength = (com.NSsites>0 ? 2 : 1);
if(com.NSsites>0) strcpy(com.treef,"M0tree");
#endif
Forestry(fout);
printf("\nTime used: %s\n", printtime(timestr));
fprintf(fout,"\nTime used: %s\n", printtime(timestr));
}
}
else {
if (com.Mgene==1) MultipleGenes(fout, fpair, com.space);
else if (com.runmode==0) Forestry(fout);
else if (com.runmode==3) StepwiseAddition(fout, com.space);
else if (com.runmode>=4) Perturbation(fout,(com.runmode==4),com.space);
else StarDecomposition(fout, com.space);
printf("\nTime used: %s\n", printtime(timestr));
fprintf(fout,"\nTime used: %s\n", printtime(timestr));
}
}
FPN(frst); fflush(frst);
FPN(frst1); fflush(frst1);
free(nodes);
} /* for (idata) */
/**************/
/*
printf("\nfalse positive: %6d\n", npositive);
fprintf(frst1, " false positive: %6d\n", npositive);
*/
fclose(frst);
k=0;
if(com.seqtype==1) k=(com.runmode==-2?6:3);
else if (com.runmode==-2) k=1;
FOR(i,k) fclose(fpair[i]);
if(com.ndata>1 && fseq) fclose(fseq);
fclose(fout); fclose(frub);
if(finitials) fclose(finitials);
FreeMemPUVR();
free(com.pose);
for(i=0; i<com.ns; i++) free(com.z[i]);
return (0);
}
/* x[]: t[ntime]; rgene[ngene-1]; kappa; p[](NSsites); omega[];
{ alpha(for NSsites) !! alpha, rho || rK[], fK[] || rK[], MK[] }
*/
int Forestry (FILE *fout)
{
static int times=0;
FILE *ftree, *frate=NULL;
int status=0, i,j=0,k, itree, ntree, np, iteration=1;
int pauptree=0, haslength;
double x[NP],xb[NP][2], xcom[NP-NBRANCH], lnL=0,lnL0=0, e=1e-8, tl=0, nchange=-1;
double *g=NULL, *H=NULL;
#ifdef NSSITESBandits
FILE *fM0tree;
#endif
if ((ftree=fopen(com.treef,"r"))==NULL) {
printf("\ntree file %s not found.\n", com.treef);
exit(-1);
}
GetTreeFileType(ftree, &ntree, &pauptree, 0);
if (com.alpha)
frate=(FILE*)gfopen(ratef,"w");
if (ntree>10 && com.npatt>10000 && com.print)
puts("\nlnf file may be large");
flnf=gfopen("lnf","w+");
fprintf(flnf,"%6d %6d %6d\n", ntree, com.ls, com.npatt);
if(com.seqtype==1 && com.aaDist>=FIT1) {
xtoy(com.pi,pcodon0,64);
zero(paa0,20);
FOR(i,com.ncode) paa0[GeneticCode[com.icode][FROM61[i]]]+=pcodon0[i];
pcodonClass=(double*)malloc(com.ncatG*64*sizeof(double));
if(pcodonClass==NULL) error2("oom pcodonClass");
}
for(itree=0; ntree==-1||itree<ntree; itree++,iteration=1) {
if(ReadTreeN(ftree,&haslength, &i,0,1))
{ puts("end of tree file."); break; }
printf("\nTREE # %2d\n", itree+1);
fprintf(fout,"\n\nTREE # %2d: ", itree+1);
fprintf(flnf,"\n\n%2d\n", itree+1);
if(com.print) fprintf (frst,"\n\nTREE # %2d\n", itree+1);
fprintf(frub,"\n\nTREE #%2d\n", itree+1);
if (com.fix_blength==2 && !haslength) error2("no branch lengths in tree");
if (com.fix_blength>0 && !haslength) com.fix_blength=0;
if (times++==0 && com.fix_blength>0 && haslength) {
if(com.clock) puts("\nBranch lengths in tree are ignored");
else {
if(com.fix_blength==2)
puts("\nBranch lengths in tree are fixed.");
else if(com.fix_blength==1)
puts("\nBranch lengths in tree used as initials.");
if(com.fix_blength==1) {
FOR(i,tree.nnode)
if((x[nodes[i].ibranch]=nodes[i].branch)<0)
x[nodes[i].ibranch]=1e-5;
}
}
}
LASTROUND=0;
if(com.cleandata)
nchange = MPScore(com.space);
if(com.ns<40) { OutTreeN(F0,0,0); printf(" MP score: %.0f",nchange); }
OutTreeN(fout,0,0); fprintf(fout," MP score: %.0f",nchange);
if(!com.clock && nodes[tree.root].nson<=2 && com.ns>2) {
puts("\nThis is a rooted tree, without clock. Check.");
fputs("\nThis is a rooted tree. Please check!",fout);
}
GetInitials(x, &i);
np = com.np;
if(noisy>=3 && np<100) matout(F0,x,1,np);
if(i==-1) iteration = 0;
if(np>NP || np-com.ntime>NP-NBRANCH) error2("raise NP");
if(com.sspace < spaceming2(np)) {
com.sspace = spaceming2(np);
printf ("\nspace adjusted to %9u bytes\n",com.sspace);
if((com.space=(double*)realloc(com.space,com.sspace))==NULL) {
printf("\ntrying to get %d bytes for ming2", com.sspace);
error2("oom space");
}
}
printf("\nntime & nrate & np:%6d%6d%6d\n",com.ntime,com.nrate,com.np);
/*
if(itree && !finitials) for(i=0;i<np-com.ntime;i++) x[com.ntime+i] = xcom[i];
*/
if(iteration && np) {
SetxBound(np, xb);
SetxInitials (np, x, xb); /* start within the feasible region */
}
PointconPnodes ();
lnL = com.plfun (x,np);
if(noisy) {
printf("\nnp =%6d", np);
printf("\nlnL0 = %12.6f\n",-lnL);
}
if(iteration && np) {
if(com.method == 1)
j = minB (noisy>2?frub:NULL, &lnL,x,xb, e, com.space);
else if (com.method==3)
j = minB2(noisy>2?frub:NULL, &lnL,x,xb, e, com.space);
else
j = ming2(noisy>2?frub:NULL,&lnL,com.plfun,NULL,x,xb, com.space,e,np);
if (j==-1 || lnL<=0 || lnL>1e7) status=-1; else status=0;
if(status) fprintf(fout,"\ncheck convergence..");
}
printf("Out..\nlnL = %12.6f\n",-lnL);
printf("%d lfun, %d EigenQcodon, %d P(t)\n",NFunCall, NEigenQ, NPMatUVRoot);
if (itree==0)
{ lnL0=lnL; FOR(i,np-com.ntime) xcom[i]=x[com.ntime+i]; }
else if (!j)
for (i=0; i<np-com.ntime; i++) xcom[i]=xcom[i]*.2+x[com.ntime+i]*0.8;
if(!LASTROUND && (com.NSsites==NSpselection||com.NSsites==NSM2aRel||com.NSsites==NSdiscrete
||com.NSsites==NSfreqs||com.NSsites==NS3normal)) {
/* transform back to p0, p1,... */
k=com.ntime+com.nrgene+com.nkappa+com.npi;
if(com.nparK) { /* HMM model for w */
k += com.ncatG;
for(i=0; i<com.ncatG; i++,k+=com.ncatG-1)
f_and_x(x+k,x+k,com.ncatG,0,0);
}
else {
j = (com.NSsites==NS3normal ? 3 : com.ncatG);
if(com.model && com.model<=NSbranch2) j=3;
f_and_x(x+k,x+k,j,0,0);
}
}
LASTROUND=1;
if(com.NSsites==NSdiscrete && com.aaDist==0 && com.model==0)
sortwM3(x);
if(com.clock) { /* move times into x[] */
for(i=0,j=!nodes[tree.root].fossil; i<tree.nnode; i++)
if(i!=tree.root && nodes[i].nson && !nodes[i].fossil)
x[j++] = nodes[i].age;
}
fprintf (fout,"\nlnL(ntime:%3d np:%3d): %13.6f %+14.6f\n",
com.ntime, np, -lnL, -lnL+lnL0);
if(com.fix_blength<2) {
OutTreeB(fout); FPN(fout);
}
/*
OutTreeB(fout); FPN(fout);
if(com.fix_blength==2) {
for(i=0; i<tree.nbranch; i++) fprintf(fout, " %8.5f", nodes[tree.branches[i][1]].branch);
FPN(fout);
}
*/
for(i=0; i<np; i++) fprintf(fout," %8.6f",x[i]);
FPN(fout); fflush(fout);
if (com.getSE) {
puts("Calculating SE's");
if(com.sspace < np*(np+1)*sizeof(double)) {
com.sspace = np*(np+1)*sizeof(double);
if((com.space=(double*)realloc(com.space,com.sspace))==NULL)
error2("oom space for SE");
}
g = com.space;
H = g + com.np;
HessianSKT2004 (x, lnL, g, H);
if(com.getSE>=2 && com.clock==0 && nodes[tree.root].nson==3) { /* g & H */
fprintf(frst2,"\n %d\n\n", com.ns);
OutTreeN(frst2, 1, 1); fprintf(frst2,"\n\n");
for(i=0; i<com.ntime; i++)
if(x[i]>0.0004 && fabs(g[i])<0.005) g[i] = 0;
for(i=0; i<com.ntime; i++) fprintf(frst2," %9.6f", x[i]); fprintf(frst2, "\n\n");
for(i=0; i<com.ntime; i++) fprintf(frst2," %9.6f", g[i]); fprintf(frst2, "\n\n");
fprintf(frst2, "\nHessian\n\n");
for(i=0; i<com.ntime; i++,FPN(frst2))
for(j=0; j<com.ntime; j++)
fprintf(frst2," %10.4g", H[i*np+j]);
fflush(frst2);
}
for(i=0; i<np*np; i++) H[i] *= -1;
matinv(H, np, np, H+np*np);
fprintf(fout,"SEs for parameters:\n");
for(i=0; i<np; i++)
fprintf(fout," %8.6f", (H[i*np+i]>0. ? sqrt(H[i*np+i]) : -1));
FPN(fout);
}
if(com.seqtype==1 && com.ntime && com.clock==0)
fprintf(fout,"\nNote: Branch length is defined as number of nucleotide substitutions per codon (not per neucleotide site).\n");
if(com.Mgene>1) {
fprintf(fout,"Note: Branch length is defined for the first gene (site partition).\n");
fprintf(fout,"For other genes, look at \"rates for genes\".\n");
}
/* if (com.clock) SetBranch (x); */
if(com.clock && com.nbtype>1)
fputs("\nWarning: branch rates are not yet applied in tree length and branch lengths",fout);
if(AbsoluteRate)
fputs("\nNote: mutation rate is not applied to tree length. Tree has times, for TreeView",fout);
for(i=0,tl=0; i<tree.nnode; i++)
if(i!=tree.root) tl += nodes[i].branch;
fprintf(fout,"\ntree length = %9.5f%s\n",tl,com.ngene>1?" (1st gene)":"");
#ifdef NSSITESBandits
if(com.NSsites==0) {
for(i=com.ntime; i<com.np; i++) fprintf(frst1,"\t%.3f", x[i]);
fprintf(frst1,"\t%.2f\t%.3f", tl, -lnL);
fM0tree=(FILE*)gfopen("M0tree", (insmodel==0?"w":"a"));
fprintf(fM0tree, "%d %d\n", com.ns, 1);
OutTreeN(fM0tree,1,1); FPN(fM0tree);
fclose(fM0tree);
}
else {
for(i=com.ntime; i<com.np; i++) fprintf(frst1,"\t%.3f",x[i]);
fprintf(frst1,"\t%.3f",-lnL);
}
#else
for(i=0; i<com.np; i++) fprintf(frst1,"\t%.3f",x[i]);
fprintf(frst1,"\t%.3f", -lnL);
/*
fprintf(frst1,"\t%.4f", (com.ns==2 ? x[0]*2 : 0));
for(i=0; i<com.nkappa; i++) fprintf(frst1,"\t%.3f",x[com.ntime+i]);
fprintf(frst1,"\t%.4f", com.omega);
fprintf(frst1,"\t%.3f", -lnL);
*/
#endif
FPN(fout); OutTreeN(fout,0,1); FPN(fout);
FPN(fout); OutTreeN(fout,1,1); FPN(fout);
if(com.clock) {
FPN(fout); OutTreeN(fout,1,PrNodeNum); FPN(fout);
}
if(com.np-com.ntime || com.clock)
DetailOutput(fout,x, H);
if (com.seqtype==AAseq && com.model>=REVaa_0)
EigenQaa(fout, Root, U, V, x+com.ntime+com.nrgene);
if (com.NSsites)
lfunNSsites_rate(frst,x,np);
if (com.print) {
if(com.rho==0 && com.nparK==0 && com.clock<=1)
AncestralSeqs(frst,x);
if(!com.NSsites && com.plfun!=lfun)
lfunRates(frate,x,np);
}
com.print -= 9;
lnL = com.plfun(x,np);
com.print += 9;
fflush(fout); fflush(flnf); fflush(frst); fflush(frst1);
} /* for(itree) */
fclose(ftree);
if(frate) fclose(frate);
if (com.aaDist && com.aaDist<10 && com.aaDist!=AAClasses
&& (com.seqtype==CODONseq||com.model==FromCodon))
printf("\n%s, %s.\n", com.daafile, (com.aaDist>0 ? "geometric" : "linear"));
if(com.seqtype==1 && com.aaDist>=FIT1) free(pcodonClass);
if(ntree==-1) ntree=itree;
if(ntree>1) {
rewind(flnf);
rell(flnf, fout, ntree);
}
fclose(flnf);
return (0);
}
double *PointKappa (double xcom[], int igene)
{
/* This points to the kappa parameters in xcom[], by looking at com.model,
igene, et&c.
*/
int k=com.nrgene;
int nka=(com.hkyREV?5:1), nw=(com.aaDist==AAClasses?com.nOmegaType:1);
if(com.Mgene>1 && com.Mgene>=3)
k += igene*(nka + nw);
if(com.fix_kappa) return(&com.kappa);
return(xcom+k);
}
double *PointOmega (double xcom[], int igene, int inode, int isiteclass)
{
/* This points to the omega parameters in xcom[], by looking at com.model,
com.NSsites and igene. This sometimes points to com.omega or com.rK[].
This is called by SetParameters(), DetailOutput(), etc.
Difficulties in using this with lfunt() etc.
Trying to remove global variables com.pomega and com.pkappa through
PointOmega and PointKappa, but was unsuccessful when too many changes were
made at the same time. Perhaps look at this later. Note that some
variables are passed over the head from lfunt() etc. to EigenQcodon().
Ziheng Notes: 8 August 2003.
*/
int k = com.nrgene+com.nkappa, backfore;
int nka=(com.hkyREV?5:1), nw=(com.aaDist==AAClasses?com.nOmegaType:1);
if (com.seqtype!=CODONseq && com.model!=FromCodon)
error2("should not be here.");
if(com.NSsites==0 && com.model==0) { /* simple case: one ratio */
if(com.ngene<=1) {
if(com.fix_omega) return (&com.omega_fix); /* fix_omega */
else ;
}
else if(com.Mgene>=3)
k += igene*(nka + nw) + nka;
}
else if(com.NSsites==0 && com.model) { /* branch model */
if (com.aaDist==0) {
if(com.fix_omega && nodes[inode].label==com.nbtype-1)
return (&com.omega_fix);
else k += (int)nodes[inode].label;
}
else if(com.aaDist==AAClasses)
k += (int)nodes[inode].label*com.nOmegaType;
}
else if (com.NSsites && com.model==0) { /* site model */
if(com.aaDist<10)
k += com.ncatG-1+2*isiteclass;
else if(com.aaDist==FIT1)
k += com.ncatG-1+4*isiteclass;
else if(com.aaDist==FIT2)
k += com.ncatG-1+5*isiteclass;
else
return (&com.rK[isiteclass]);
}
else if (com.NSsites && com.model<=NSbranch2) { /* branch&site models A&B */
k += 2; /* skip the frequencies. */
backfore = (int)nodes[inode].label;
if(isiteclass<2)
return(&com.rK[isiteclass]);
else if(isiteclass==2) {
if(com.fix_omega && backfore)
return(&com.omega_fix);
else
k += 2 + (com.NSsites==NSpselection?0:2) + backfore;
}
}
else { /* NSbranch3: Clade models C and D */
k += com.ncatG-1; /* skip the frequencies. */
backfore = (int)nodes[inode].label;
if(isiteclass<com.ncatG-1)
return(&com.rK[isiteclass]);
else if(isiteclass == com.ncatG-1) {
if(com.fix_omega && backfore==com.nbtype-1)
return(&com.omega_fix);
else
k += 2 + (com.NSsites==NSpselection?0:2) + backfore;
}
}
return (xcom+k);
}
int sortwM3(double x[])
{
/* sort the w values for NSsites=NSdiscrete
This assumes that com.freqK[] and com.rK[] have been initialized.
*/
int i, k=com.ntime+com.nrgene+com.nkappa+com.npi, index[NCATG];
double space[NCATG];
if(com.NSsites!=NSdiscrete) error2("sortwM3");
if(fabs(1-sum(com.freqK,com.ncatG))>1e-6) error2("sortwM3: freqK");
if(com.nparK) { puts("\asortwM3 for HMM not implemented yet.."); return(-1); }
indexing(com.rK, com.ncatG, index, 0, (int*)space);
xtoy(com.rK,space,com.ncatG);
FOR(i,com.ncatG) com.rK[i]=space[index[i]];
xtoy(com.freqK,space,com.ncatG);
FOR(i,com.ncatG) com.freqK[i]=space[index[i]];
FOR(i,com.ncatG-1) x[k+i]=com.freqK[i];
FOR(i,com.ncatG) x[k+com.ncatG-1+i]=com.rK[i];
return(0);
}
void printParametersNSsites (FILE* fout, double x[])
{
int i,j, k=com.ntime+com.nrgene+com.nkappa+com.npi;
double w[NBTYPE][3];
if(!com.NSsites) error2("should not be here");
fprintf(fout,"\n\ndN/dS (w) for site classes (K=%d)\n",com.ncatG);
if(com.model==0) {
fputs("\np: ",fout); for(i=0; i<com.ncatG; i++) fprintf(fout," %8.5f", com.freqK[i]);
fputs("\nw: ",fout); for(i=0; i<com.ncatG; i++) fprintf(fout," %8.5f", com.rK[i]);
i = com.ncatG-1;
if(com.freqK[i] < 1e-5 && com.rK[i] > 1)
fprintf(fout,"\n(note that p[%d] is zero)\n", i);
}
else if(com.model<=NSbranch2) {
fprintf(fout,"\nsite class 0 1 2a 2b");
fprintf(fout,"\nproportion ");
for(i=0; i<com.ncatG; i++) fprintf(fout," %8.5f", com.freqK[i]);
fprintf(fout,"\nbackground w ");
for(i=0; i<com.ncatG; i++) fprintf(fout," %8.5f", com.rK[i%2]);
fprintf(fout,"\nforeground w ");
for(i=0; i<com.ncatG-2; i++) fprintf(fout," %8.5f", com.rK[i%2]);
for(i=0; i<2; i++) fprintf(fout," %8.5f", (com.fix_omega?com.omega_fix:x[com.np-1]));
if(com.freqK[2] < 1e-5 && com.rK[2] > 1)
fprintf(fout, "\n(note that p[2] is zero)\n");
}
else if (com.model==NSbranch3) {
k += com.ncatG-1 + (com.NSsites==3 && com.ncatG>2) + 1; /* freqs & w0 & w1 */
for(i=0; i<com.nbtype; i++) {
for(j=0; j<com.ncatG-1; j++)
w[i][j] = com.rK[j];
w[i][com.ncatG-1] = (i==com.nbtype-1 && com.fix_omega ? com.omega_fix : x[k++]);
}
fprintf(fout,"\nsite class ");
for(i=0; i<com.ncatG; i++) fprintf(fout," %9d", i);
fprintf(fout,"\nproportion ");
for(i=0; i<com.ncatG; i++) fprintf(fout, " %9.5f", com.freqK[i]);
for(i=0; i<com.nbtype; i++) {
fprintf(fout,"\nbranch type %d: ", i);
for(j=0; j<com.ncatG; j++) fprintf(fout," %9.5f", w[i][j]);
}
i = com.ncatG-1;
if(com.freqK[i] < 1e-5) fprintf(fout,"\n(note that p[%d] is zero)\n", i);
}
fprintf(fout, "\n");
}
static int ijAAref=19*20+9;
/* reference aa pair: VI (for REVaa, REVaa_0, AAClasses to estimate Grantham)
The rate for this pair is set to 1, and other rates are relative to it.
*/
#define E1N(m,s) (s/sqrt(PI*2)*exp(-square((1-m)/s)/2)+m*(1-CDFNormal((1-m)/s)))
void DetailOutput (FILE *fout, double x[], double var[])
{
/* var[] is used for codon models if com.getSE=1 to calculate the variances
of dS and dN.
*/
int i,j,k=com.ntime, np=com.np,npclass, ibtype;
double om=-1,N=-1,S=0,dN=0,dS=0,dSt,dNt, mr=0, vtw[4],vSN[4], omclass[NCATG];
double phi1=0,phi2=0, t, *tdSdNb=NULL, y;
double mu[3]={0,1,2},sig[3]={-1}; /* 3normal: mu0=0 fixed. mu2 estimated */
double fb3x4[12];
fprintf(fout,"\nDetailed output identifying parameters\n");
if(com.clock) OutputTimesRates(fout, x, var);
k = com.ntime;
if (com.nrgene) {
fprintf (fout, "\nrates for %d genes:%6.0f", com.ngene, 1.);
for(i=0; i<com.nrgene; i++)
fprintf (fout, " %8.5f", x[k++]);
FPN(fout);
}
if (com.seqtype==CODONseq || com.model==FromCodon) {
if (com.hkyREV) {
fprintf(fout,"a (TC) & b (TA) & c (TG) & d (CA) & e (CG): ");
FOR(i,5) fprintf(fout,"%8.5f ", x[k++]); FPN(fout);
}
else if (!com.fix_kappa && com.Mgene<=2)
fprintf(fout,"\nkappa (ts/tv) = %8.5f\n", x[k++]);
if(com.npi) {
if (com.codonf==F1x4 || com.codonf==F1x4MG || com.codonf>=FMutSel0) {
for(j=0,fb3x4[3]=1; j<3; j++) fb3x4[j] = x[k+j];
abyx(1/sum(fb3x4,4), fb3x4, 4);
fprintf(fout, "\nFrequency parameters:\n");
for(j=0;j<4;j++)
fprintf(fout, " %9.5f (%c)", fb3x4[j], BASEs[j]);
if(com.codonf==FMutSel)
for(j=0;j<4;j++)
fprintf(frst1, "\t%.4f", fb3x4[j]);
}
else if (com.codonf==F3x4 || com.codonf==F3x4MG) {
for(j=0;j<3;j++) {
xtoy(x+k+j*3, fb3x4+j*4, 3);
fb3x4[j*4+3] = 1;
abyx(1/sum(fb3x4+j*4,4), fb3x4+j*4, 4);
}
fprintf(fout, "\nCodon frequency model: %s", codonfreqs[com.codonf]);
fprintf(fout, "\nFrequency parameters:\n");
for(i=0; i<3; i++,FPN(fout)) {
fprintf(fout, "Position %d: ", i+1);
for(j=0;j<4;j++)
fprintf(fout, " %9.5f (%c)", fb3x4[i*4+j], BASEs[j]);
}
}
if(com.npi>3 || com.codonf!=FMutSel) {
fprintf(fout, "\nEquilibrium codon frequencies (evolver-style):\n");
for(j=0; j<64; j++) {
fprintf(fout," %11.8f", GeneticCode[com.icode][j]==-1?0:com.pi[FROM64[j]]);
if((j+1)%4==0) FPN(fout);
}
}
if(com.npi>3 && com.codonf>=FMutSel0) {
if(com.codonf==FMutSel0) {
fprintf(fout, "\nEquilibrium amino acid frequencies:\n");
for(j=0; j<20; j++) {
fprintf(fout," %11.8f", com.piAA[j]);
if((j+1)%10==0) FPN(fout);
}
fprintf(fout, "\nfitness for %d codons (amino acid %c has fitness 0)\n", com.ncode, AAs[19]);
i = GeneticCode[com.icode][FROM61[com.ncode-1]];
y = (i == 19 ? 0 : com.ppi[3+i]);
for(j=0; j<com.ncode; j++) {
i = GeneticCode[com.icode][FROM61[j]];
fprintf(fout," %9.6f", (i == 19 ? 0 : com.ppi[3+i])-y);
}
}
else {
fprintf(fout, "\nfitness for %d codons (GGG has fitness 0)\n", com.ncode-1);
for(j=0; j<com.ncode-1; j++)
fprintf(fout," %9.6f", com.ppi[3+j]);
}
FPN(fout);
}
k += com.npi;
if(com.codonf == FMutSel)
SelectionCoefficients(frst, com.pkappa, com.ppi, com.omega);
}
/* dN/dS by averaging over site classes.
Qfactor_NS was calculated during ML iteration and is used here..
*/
if(com.NSsites && com.model==0) {
for(j=0,dS=dN=0; j<com.ncatG; j++) {
if(com.aaDist) {
if(com.aaDist<10)
com.pomega = x+k+com.ncatG-1+2*j;
else if(com.aaDist >= FIT1) {
com.pomega = x+k+com.ncatG-1+j*(4+(com.aaDist==FIT2));
xtoy(pcodonClass+j*64, com.pi, com.ncode);
}
}
mr = -1;
EigenQcodon(2,1,&S,&dSt,&dNt,NULL,NULL,NULL, &mr, com.pkappa,com.rK[j],PMat);
/* t=1 used here, and dS & dN used later for each branch */
dS += com.freqK[j]*dSt;
dN += com.freqK[j]*dNt;
omclass[j] = dNt/dSt;
}
om = dN/dS;
dS *= Qfactor_NS;
dN *= Qfactor_NS;
N = com.ls*3 - S;
}
if(!com.fix_omega && com.NSsites==0 && com.model==0 && com.aaDist!=7 && com.Mgene<=2)
fprintf(fout,"\nomega (dN/dS) = %8.5f\n", x[k++]);
/* dN/dS rate ratios for classes */
if (com.NSsites >= NSgamma) {
fprintf(fout,"\nParameters in M%d (%s):\n ", com.NSsites, NSsitesmodels[com.NSsites]);
if(com.NSsites == NSgamma)
fprintf(fout," a=%9.5f b=%9.5f\n",x[k],x[k+1]);
else if(com.NSsites == NS2gamma)
fprintf(fout," p0 = %9.5f a0 = %9.5f b0 = %9.5f\n(p1 = %9.5f) a1 = %9.5f (b1 = %9.5f)\n",
x[k],x[k+1],x[k+2], 1-x[k], x[k+3],x[k+3]);
else if(com.NSsites == NSbeta)
fprintf(fout,"p = %9.5f q = %9.5f\n",x[k],x[k+1]);
else if(com.NSsites == NSbetaw)
fprintf(fout," p0 = %9.5f p = %9.5f q = %9.5f\n (p1 = %9.5f) w = %9.5f\n",
x[k],x[k+1],x[k+2], 1-x[k], (com.fix_omega?com.omega:x[k+3]));
else if(com.NSsites == NSbetagamma)
fprintf(fout," p0 = %9.5f p = %9.5f q = %9.5f\n(p1 = %9.5f) a = %9.5f b = %9.5f\n",
x[k],x[k+1],x[k+2], 1-x[k], x[k+3],x[k+4]);
else if(com.NSsites == NSbeta1gamma)
fprintf(fout," p0 = %9.5f p = %9.5f q = %9.5f\n(p1 = %9.5f) a = %9.5f b = %9.5f\n",
x[k],x[k+1],x[k+2], 1-x[k], x[k+3],x[k+4]);
else if(com.NSsites == NSbeta1normal)
fprintf(fout," p0 = %9.5f p = %9.5f q = %9.5f\n(p1 = %9.5f) u = %9.5f s = %9.5f\n",
x[k],x[k+1],x[k+2], 1-x[k], x[k+3],x[k+4]);
else if(com.NSsites == NS02normal)
fprintf(fout,"p0 = %9.5f p1 = %9.5f u2 = %9.5f s1 = %9.5f s2 = %9.5f\n",
x[k],x[k+1],x[k+2],x[k+3],x[k+4]);
else if(com.NSsites == NS3normal)
fprintf(fout,"p0 = %9.5f p1 = %9.5f (p2 = %9.5f)\n u2 = %9.5f s0 = %9.5f s1 = %9.5f s2 = %9.5f\n",
x[k],x[k+1], 1-x[k]-x[k+1], x[k+2],x[k+3],x[k+4],x[k+5]);
else if(com.NSsites == NSTgamma)
fprintf(fout,"alpha = %9.5f beta = %9.5f T = %9.5f\n", x[k],x[k+1],(com.fix_omega ? com.omega_fix : x[k+2]));
else if(com.NSsites == NSTinvgamma)
fprintf(fout,"alpha = %9.5f beta = %9.5f T = %9.5f\n", x[k],x[k+1],(com.fix_omega ? com.omega_fix : x[k+2]));
else if(com.NSsites == NSTgamma1)
fprintf(fout,"p0 = %9.5f (p1 = %9.5f) alpha = %9.5f beta = %9.5f T = %9.5f\n", x[k],1-x[k],x[k+1],x[k+2],(com.fix_omega ? com.omega_fix : x[k+3]));
else if(com.NSsites==NSTinvgamma1)
fprintf(fout,"p0 = %9.5f (p1 = %9.5f) alpha = %9.5f beta = %9.5f T = %9.5f\n", x[k],1-x[k],x[k+1],x[k+2],(com.fix_omega ? com.omega_fix : x[k+3]));
}
if (com.NSsites==NSdiscrete && com.aaDist) { /* structural site classes */
npclass=(com.aaDist<10 ? 2 : (com.aaDist==FIT1?4:5));
fprintf(fout,"\nParameters in each class (%d)",npclass);
fprintf(fout,"%s:\n\n",
(com.aaDist<10 ? "(b, a)" : "(a_p, p*, a_v, v*, b)"));
for(j=0,k+=com.ncatG-1; j<com.ncatG; j++,FPN(fout)) {
fprintf(fout,"%d: f=%8.5f, ",j+1,com.freqK[j]);
FOR(i,npclass) fprintf(fout,"%9.5f",x[k++]);
fprintf(fout," dN/dS = %7.5f", omclass[j]);
}
}
else if (com.NSsites && com.aaDist==0) {
printParametersNSsites(fout,x);
if (com.nparK) {
fprintf(fout,"\nTransition matrix M in HMM: M_ij=Prob(i->j):\n");
matout(fout, com.MK, com.ncatG, com.ncatG);
}
}
else if(com.aaDist && com.aaDist<=6) { /* one class (YNH98, Genetics) */
k = com.ntime+com.nrgene+com.nkappa+com.npi;
fprintf (fout,"\nb = %9.5f", x[k++]);
if (com.seqtype==CODONseq) fprintf (fout,"\na = %9.5f\n", x[k++]);
}
else if(com.aaDist && com.aaDist>=11) { /* fitness, one class */
fprintf (fout,"\nfitness model (a_p, p*, a_v, v*, (and w0 for FIT2):\n");
k = com.ntime+com.nrgene+com.nkappa+com.npi;
FOR(i,4+(com.aaDist==FIT2)) fprintf(fout," %9.5f",x[k++]); FPN(fout);
}
else if(com.model==0 && com.NSsites==0 && !com.fix_omega && com.Mgene>2) {
if(!com.fix_kappa && !com.fix_omega) {
for(i=0; i<com.ngene; i++,k+=2)
fprintf(fout,"\ngene #%2d: kappa = %9.5f omega = %9.5f", i+1, x[k], x[k+1]);
}
else if(com.fix_kappa) {
for(i=0; i<com.ngene; i++,k++)
fprintf(fout,"\ngene #%2d: omega = %9.5f", i+1, x[k]);
}
else if(com.fix_omega) {
for(i=0; i<com.ngene; i++,k++)
fprintf(fout,"\ngene #%2d: kappa = %9.5f", i+1, x[k]);
}
}
}
else
k += com.nrate;
for(j=0; j<com.nalpha; j++) {
if (!com.fix_alpha)
fprintf(fout,"\nalpha (gamma, K = %d) = %8.5f", com.ncatG,(com.alpha=x[k++]));
if(com.nalpha>1)
DiscreteGamma(com.freqK,com.rK,com.alpha,com.alpha,com.ncatG,DGammaUseMedian);
fprintf(fout,"\nrate: "); FOR(i,com.ncatG) fprintf(fout," %8.5f",com.rK[i]);
fprintf(fout,"\nfreq: "); FOR(i,com.ncatG) fprintf(fout," %8.5f",com.freqK[i]);
}
if (com.rho) {
if (!com.fix_rho) fprintf (fout, "rho (correlation) = %8.5f\n", x[k]);
fprintf (fout, "transition probabilities between rate categories:\n");
for(i=0;i<com.ncatG;i++,FPN(fout)) FOR(j,com.ncatG)
fprintf(fout," %8.5f",com.MK[i*com.ncatG+j]);
}
if (com.aaDist==AAClasses) {
if(com.model==0) {
fprintf (fout, "\nw (dN/dS) classes for amino acid pairs:\n");
for(k=0; k<com.nOmegaType; k++) {
fprintf (fout, " %9.5f: ", x[com.ntime+com.nrgene+com.nkappa+k]);
for(i=0; i<20; i++) for(j=0; j<i; j++)
if (OmegaAA[i*(i-1)/2+j]==k) fprintf(fout," %c%c", AAs[i],AAs[j]);
if (k==0) fprintf(fout, " (background ratio)");
FPN(fout);
}
/* output for bubble plot */
if(com.seqtype==1) {
for(i=0; i<20; i++) for(j=0; j<i; j++) {
y = 0;
if(AA1STEP[i*(i-1)/2+j])
y = x[com.ntime+com.nrgene+com.nkappa + OmegaAA[i*(i-1)/2+j]]; /* omega */
fprintf(frst, "%c%c %3d %3d %8.5f\n", AAs[i], AAs[j], i+1, j+1, y);
}
}
}
else {
fprintf (fout, "\nw (dN/dS) for branch-type and amino acid class:\n");
k = com.ntime+com.nrgene+com.nkappa+com.npi;
for(i=0; i<com.nbtype; i++) {
fprintf(fout, "Branch type %d: ", i);
for(j=0; j<com.nOmegaType; j++) {
fprintf (fout, " %9.5f", x[k++]);
}
FPN(fout);
}
}
}
/* dN & dS for each branch in the tree */
if(com.seqtype==CODONseq && com.ngene==1 && (com.model==0 || com.NSsites==0)
/*||com.model==FromCodon||com.aaDist==AAClasses */){
tdSdNb = (double*)malloc(tree.nnode*3*sizeof(double));
if(tdSdNb==NULL) error2("oom DetailOutput");
if(com.model && com.aaDist!=AAClasses ) { /* branch models */
fprintf(fout, "\nw (dN/dS) for branches: ");
k = com.ntime+com.nrgene+com.nkappa+com.npi;
for(i=0; i<com.nOmega-1; i++)
fprintf(fout, " %7.5f", x[k+i]);
fprintf(fout, " %7.5f", (com.fix_omega ? com.omega : x[k+i]));
FPN(fout);
}
fputs("\ndN & dS for each branch\n\n",fout);
fprintf(fout,"%7s%11s%8s%8s%8s%8s%8s %5s %5s\n\n",
"branch","t","N","S","dN/dS","dN","dS","N*dN","S*dS");
for(i=0,dNt=dSt=0; i<tree.nbranch; i++) {
fprintf(fout,"%4d..%-3d ",tree.branches[i][0]+1,tree.branches[i][1]+1);
k = com.ntime+com.nrgene+com.nkappa+com.npi;
/* if(com.codonf >= FMutSel0)
com.ppi = x+com.ntime+com.nrgene+com.nkappa;
*/
t = nodes[tree.branches[i][1]].branch;
if(com.NSsites==0) {
if (com.aaDist) om=-1; /* not used in EigenQcodon() */
else if (com.model==0 || com.model==FromCodon)
om = (com.fix_omega?com.omega:x[k]);
else if (com.model==NSbranchB) om = x[k+i];
else if (com.model==NSbranch2) om = nodes[tree.branches[i][1]].omega;
if(com.model && com.aaDist)
com.pomega = x + com.ntime + com.nrgene + !com.fix_kappa + com.npi
+ (int)nodes[tree.branches[i][1]].label*com.nOmegaType;
mr = 0;
EigenQcodon(2,t,&S,&dS,&dN, NULL,NULL,NULL, &mr, com.pkappa,om,PMat); /* PMat destroyed! */
dNt += dN;
dSt += dS;
if (com.aaDist) om = dN/dS;
/*
if(dS<.01/com.ls) om = -1;
else if(om==-1) om = dN/dS;
if(com.model==0) om = com.omega;
*/
N = com.ls*3-S;
if(com.model) {
tdSdNb[i] = t;
tdSdNb[tree.nnode+i] = dS;
tdSdNb[tree.nnode*2+i] = dN;
}
fprintf(fout," %7.3f %7.1f %7.1f %7.4f %7.4f %7.4f %5.1f %5.1f",
t,N,S,om,dN,dS,N*dN,S*dS);
/* fprintf(frst,"%8.1f%8.1f %9.5f%9.4f%9.4f",N,S,om,dN,dS); */
/* om not used in AAClasses model */
if(com.getSE>1&&com.fix_blength<2&&!com.clock&&com.aaDist!=AAClasses){
vtw[0] = var[i*np+i];
vtw[3] = var[k*np+k];
vtw[1] = vtw[2] = var[i*np+k];
VariancedSdN(t, om, vtw, vSN);
fprintf(fout," dN = %7.4f +- %.4f dS = %7.4f +- %.4f",
dN,(vSN[3]>0?sqrt(vSN[3]):-0),dS,(vSN[0]>0?sqrt(vSN[0]):-0));
fprintf(fout," (method 2)");
}
FPN(fout);
}
else if(com.model==0) { /* NSsites & other site-class models */
fprintf(fout,"%9.3f %8.1f %8.1f %8.4f %8.4f %8.4f %6.1f %6.1f\n",
t,N,S,om,dN*t,dS*t, N*dN*t,S*dS*t);
}
else { /* NSbranchsites models */
;
}
} /* for (i) */
if(com.NSsites==0) {
fprintf(fout,"\ntree length for dN: %12.4f\ntree length for dS: %12.4f\n", dNt,dSt);
fprintf(frst1,"\t%.4f\t%.4f", dNt, dSt);
}
if(com.model && com.NSsites==0) {
fprintf(fout,"\ndS tree:\n");
for(i=0; i<tree.nbranch; i++)
nodes[tree.branches[i][1]].branch = tdSdNb[tree.nnode+i];
OutTreeN(fout,1,1);
fprintf(fout,"\ndN tree:\n");
for(i=0; i<tree.nbranch; i++)
nodes[tree.branches[i][1]].branch = tdSdNb[tree.nnode*2+i];
OutTreeN(fout,1,1); FPN(fout);
/* revert branch lengths to the original values */
for(i=0; i<tree.nbranch; i++)
nodes[tree.branches[i][1]].branch = tdSdNb[i];
free(tdSdNb);
/* the first label is the label assigned in the tree file. The second is w ratio */
if(com.aaDist==0) {
fprintf(fout,"\nw ratios as labels for TreeView:\n");
OutTreeN(fout, 1, PrOmega); FPN(fout);
}
}
} /* if codonseqs */
FPN(fout); fflush(fout);
}
void ReadNSsitesModels(char *line)
{
/* This reads the line NSsites = 0 1 2 3 7 8 in codeml.ctl.
*/
char *pline;
int pop_digit;
if ((pline=strstr(line, "="))==NULL) error2(".ctl file error NSsites");
pline++;
for (nnsmodels=0; nnsmodels<maxNSsitesModels; nnsmodels++) {
if(sscanf(pline, "%d", &nsmodels[nnsmodels]) != 1) break;
for(pop_digit=0; ; ) {
if(isdigit(*pline)) { pline++; pop_digit=1; }
else if(isspace(*pline)) {
pline++;
if(pop_digit) break;
}
else error2(".ctl file NSsites line strange.");
}
if(nsmodels[nnsmodels]<0 || nsmodels[nnsmodels]>=maxNSsitesModels)
error2("NSsites model");
}
com.NSsites=nsmodels[0];
}
int ReadDaafiles(char *line)
{
/* This reads the daa files and set up the eigen matrices U,V,Root for combined
clock analyses of multiple protein data sets (clock = 5 or 6).
*/
int i, ng=(com.ndata>1?com.ndata:NGENE), markgenes[NGENE];
splitline(line, markgenes);
for(i=0; i<ng; i++) {
if(!isalnum(line[markgenes[i]])) break;
sscanf(line+markgenes[i], "%s", data.daafile[i]);
printf("protein %2d uses %s\n", i+1, data.daafile[i]);
}
return(0);
}
int GetOptions (char *ctlf)
{
int iopt, i,j, nopt=37, lline=255;
char line[255], *pline, opt[99], *comment="*#";
char *optstr[] = {"seqfile", "outfile", "treefile", "seqtype", "noisy",
"cleandata", "runmode", "method",
"clock", "TipDate", "getSE", "RateAncestor", "CodonFreq", "estFreq", "verbose",
"model", "hkyREV", "aaDist","aaRatefile",
"NSsites", "NShmm", "icode", "Mgene", "fix_kappa", "kappa",
"fix_omega", "omega", "fix_alpha", "alpha","Malpha", "ncatG",
"fix_rho", "rho", "ndata", "bootstrap", "Small_Diff", "fix_blength"};
double t;
FILE *fctl;
int ng=-1, markgenes[NGENE+99];
char *daafiles[]={"", "grantham.dat", "miyata.dat",
"g1974c.dat","g1974p.dat","g1974v.dat","g1974a.dat"};
fctl=gfopen(ctlf,"r");
if (noisy) printf ("\n\nReading options from %s..\n", ctlf);
for (;;) {
if (fgets(line, lline, fctl) == NULL) break;
for (i=0,t=0,pline=line; i<lline&&line[i]; i++)
if (isalnum(line[i])) { t=1; break; }
else if (strchr(comment,line[i])) break;
if (t==0) continue;
sscanf (line, "%s%*s%lf", opt,&t);
if ((pline=strstr(line, "="))==NULL)
error2("err: option file. add space around the equal sign?");
for (iopt=0; iopt<nopt; iopt++) {
if (strncmp(opt, optstr[iopt], 8)==0) {
if (noisy>=9)
printf ("\n%3d %15s | %-20s %6.2f", iopt+1,optstr[iopt],opt,t);
switch (iopt) {
case ( 0): sscanf(pline+1, "%s", com.seqf); break;
case ( 1): sscanf(pline+1, "%s", com.outf); break;
case ( 2): sscanf(pline+1, "%s", com.treef); break;
case ( 3): com.seqtype=(int)t; break;
case ( 4): noisy=(int)t; break;
case ( 5): com.cleandata=(char)t; break;
case ( 6): com.runmode=(int)t; break;
case ( 7): com.method=(int)t; break;
case ( 8): com.clock=(int)t; break;
case ( 9):
sscanf(pline+1, "%lf%lf", &com.TipDate, &com.TipDate_TimeUnit);
break;
case (10): com.getSE=(int)t; break;
case (11): com.print=(int)t; break;
case (12): com.codonf=(int)t; break;
case (13): com.npi=(int)t; break;
case (14): com.verbose=(int)t; break;
case (15): com.model=(int)t; break;
case (16): com.hkyREV=(int)t; break;
case (17): com.aaDist=(int)t; break;
case (18):
sscanf(pline+2,"%s",com.daafile);
if(com.seqtype==2 && com.ndata>1 && (com.clock==5 || com.clock==6)) {
ReadDaafiles(pline+2);
break;
}
break;
case (19): ReadNSsitesModels(line); break;
case (20): com.nparK=(int)t; break;
case (21):
com.icode=(int)t;
if(com.seqtype==1 && (com.clock==5 || com.clock==6)) {
ng = splitline (++pline, markgenes);
for(j=0; j<min2(ng,com.ndata); j++)
if(!sscanf(pline+markgenes[j],"%d",&data.icode[j])) break;
for(j=0; j<min2(ng,com.ndata); j++) printf("%4d", data.icode[j]); FPN(F0);
}
break;
case (22): com.Mgene=(int)t; break;
case (23): com.fix_kappa=(int)t; break;
case (24):
com.kappa=t;
if(com.seqtype==1 && com.fix_kappa && (com.clock==5 || com.clock==6)) {
ng = splitline (++pline, markgenes);
for(j=0; j<min2(ng,com.ndata); j++)
if(!sscanf(pline+markgenes[j],"%lf",&data.kappa[j])) break;
matout(F0, data.kappa, 1, min2(ng,com.ndata));
}
break;
case (25): com.fix_omega=(int)t; break;
case (26):
com.omega=t;
if(com.seqtype==1 && com.fix_omega && (com.clock==5 || com.clock==6)) {
ng = splitline (++pline, markgenes);
for(j=0; j<min2(ng,com.ndata); j++)
if(!sscanf(pline+markgenes[j],"%lf",&data.omega[j])) break;
matout(F0, data.omega, 1, min2(ng,com.ndata));
}
break;
case (27): com.fix_alpha=(int)t; break;
case (28):
com.alpha=t;
if(com.fix_alpha && t && (com.clock==5 || com.clock==6)) {
ng = splitline (++pline, markgenes);
for(j=0; j<min2(ng,com.ndata); j++)
if(!sscanf(pline+markgenes[j], "%lf", &data.alpha[j])) break;
matout(F0, data.alpha, 1, min2(ng,com.ndata));
}
break;
case (29): com.nalpha=(int)t; break;
case (30): com.ncatG=(int)t; break;
case (31): com.fix_rho=(int)t; break;
case (32): com.rho=t; break;
case (33): com.ndata=(int)t; break;
case (34): com.bootstrap=(int)t; break;
case (35): Small_Diff=t; break;
case (36): com.fix_blength=(int)t; break;
}
break;
}
}
if (iopt==nopt)
{ printf ("\noption %s in %s not recognised\n", opt,ctlf); exit(-1); }
}
fclose (fctl);
if((com.fix_kappa || (com.fix_alpha&&com.alpha)) && (com.clock==5 || com.clock==6))
printf("Using parameters from the control file.");
if (noisy) FPN(F0);
if(com.seqtype==1 || com.model==FromCodon)
setmark_61_64 ();
if (com.seqtype==AAseq || com.seqtype==CODON2AAseq) {
if(com.NSsites) error2("use NSsites=0 for amino acids?");
if(com.hkyREV && com.model!=FromCodon) /* REV & FromCodon not well-tested. */
error2("use hkyREV=0 for amino acids?");
com.ncode = 20;
if(com.aaDist==AAClasses)
com.nrate = com.nkappa=(com.hkyREV ? 5 : !com.fix_kappa);
switch (com.model) {
case (Poisson): case (EqualInput): case (Empirical): case (Empirical_F):
com.fix_kappa=1; com.kappa=0; com.nrate=0; break;
case (FromCodon):
com.nrate=com.nkappa = (com.hkyREV ? 5 : !com.fix_kappa);
if(com.aaDist) com.nrate++;
if(com.fix_omega) error2("fix_omega = 1");
if(com.codonf) {
com.codonf=0; puts("CodonFreq=0 reset for model=6.");
}
break;
case (REVaa_0): com.fix_kappa=0; com.kappa=0; break;
case (REVaa): com.fix_kappa=0; com.kappa=0; com.nrate=189; break;
default: error2("model unavailable");
}
if(com.Mgene>2 || (com.Mgene==2 && (com.model==Fequal||com.model==2)))
error2 ("Mgene && model");
if(com.seqtype==2 && com.model!=FromCodon && com.model!=AAClasses)
{ com.fix_kappa=com.fix_omega=1; com.kappa=com.omega=0; }
}
else if(com.seqtype==CODONseq) {
if(com.nparK)
if (com.model||com.aaDist||com.NSsites!=NSdiscrete||com.alpha||com.rho)
error2("HMM model option");
if(com.Mgene>1 && com.model) error2("Mgene & model?");
if(com.fix_kappa) {
if(com.hkyREV)
error2("can't fix kappa for the codon model you selected.");
else
com.pkappa[0] = com.kappa;
}
if(com.codonf>=FMutSel0 && com.Mgene>=2)
error2("model FMutSel + Mgene not implemented");
if(com.runmode==-2 && com.seqtype==1 && com.npi)
error2("runmode = -2 not implemented for codon models with frequencies");
if(com.hkyREV && (com.aaDist || com.Mgene>1))
error2("hkyREV with aaDist or Mgene: check options?\a");
if(com.NSsites<0 || com.NSsites>maxNSsitesModels || (com.NSsites>13 && com.NSsites<22))
error2("option NSsites.");
if(com.aaDist && com.NSsites)
error2("aaDist & NSsites don't work together");
if((com.model && com.aaDist)
&& (com.model>NSbranch2 || com.aaDist!=AAClasses))
error2("model & aaDist");
if(com.model==NSbranch3 && com.NSsites!=2 && com.NSsites!=3)
error2("clade model should have model = 3 NSsites = 2 or 3.");
if(com.aaDist && com.fix_omega)
error2("can't fix_omega for aaDist models");
com.nrate=com.nkappa = (com.hkyREV ? 5 : !com.fix_kappa);
/* pi_T, pi_C, pi_A are counted as frequency parameters pi. */
if(com.codonf==0)
com.npi = 0;
if(com.codonf==FMutSel0) /* FMutSel0: pi_TCA plus 20 AA freqs. */
com.npi = 3 + (com.npi ? 20-1 : 0);
else if(com.codonf==FMutSel) /* FMutSel: pi_TCA plus 60 codon freqs. */
com.npi = 3 + (com.npi ? com.ncode-1 : 0);
else if(com.npi) {
if (com.codonf==F1x4 || com.codonf==F1x4MG) com.npi = 3;
else if (com.codonf==F3x4 || com.codonf==F3x4MG) com.npi = 9;
else if (com.codonf==Fcodon) com.npi = com.ncode-1;
}
com.nrate += com.npi;
if (com.aaDist!=AAClasses) {
if(com.fix_kappa>1) error2("fix_kappa>1, not tested."); /** ???? */
if (com.model>0 && (com.alpha || !com.fix_alpha))
error2("dN/dS ratios among branches not implemented for gamma");
if (com.model>0 && com.clock)
error2("model and clock don't work together");
if (com.fix_omega) {
com.omega_fix=com.omega;
if((com.model==0 && com.NSsites==NSdiscrete)
|| (com.model && com.NSsites && com.NSsites!=NSpselection
&&com.NSsites!=NSdiscrete && com.NSsites!=NSbetaw))
error2("\afix_omega?");
}
if (com.model>NSbranch3) error2("seqtype or model.");
/*
if (com.model==NSbranch2 && com.clock==2)
error2("NSbranch & local clock.");
*/
if (com.model==NSbranch3 && com.NSsites==NSpselection && com.ncatG!=3)
{ com.ncatG=3; puts("ncatG=3 reset."); }
if(com.kappa<0) error2("kappa..");
if (com.runmode) com.fix_blength=0;
if(com.runmode==-2 && (com.NSsites||com.alpha||com.aaDist))
error2("wrong model for pairwise comparison.\ncheck NSsites, alpha, aaDist, model etc.");
if(com.runmode>0 && com.model==2) error2("tree search & model");
if(com.aaDist && com.NSsites!=0 && com.NSsites!=NSdiscrete)
error2("NSsites && aaDist.");
if((com.NSsites || nnsmodels>1) && (com.alpha || com.fix_alpha==0))
error2("NSsites & Gamma");
if(com.seqtype==1 && (com.alpha || com.fix_alpha==0))
puts("\aGamma codon model: are you sure this is the model you want to use? ");
if(com.aaDist==0) {
if((!com.fix_omega || (com.Mgene && com.Mgene>=3)) && !com.NSsites)
com.nrate++;
}
else {
if(com.aaDist<=6) com.nrate+=2; /* a & b, PSB2000 */
else if(com.aaDist==FIT1) com.nrate+=4; /* fitness models: */
else if(com.aaDist==FIT2) com.nrate+=5; /* ap, p*, av, v*, b */
if(com.aaDist>=FIT1)
for(i=0; i<2; i++)
for(j=0;j<20;j++) AAchem[i][j] /= AAchem[i][20];
}
if(com.NSsites) {
if(com.NSsites==NSfreqs && com.ncatG!=5)
{ puts("\nncatG changed to 5."); com.ncatG=5; }
if(com.model && com.NSsites)
if((com.model!=2 && com.model!=3)
|| (com.NSsites!=NSpselection && com.NSsites!=NSdiscrete))
error2("only NSsites=2,3 & model=2,3 are compatible.");
switch(com.NSsites) {
case (NSnneutral): com.ncatG=2; break;
case (NSpselection):
case (NSM2aRel):
com.ncatG=3; break;
case (NSbetaw): com.ncatG++; break;
case (NS02normal): com.ncatG++; break;
}
if(com.model==2) { /* branchsite models A & B */
if(com.ncatG!=3) puts("\abranch-site model: use ncatG=3 only.");
com.ncatG=4;
com.nrate += (com.NSsites==2?2:3);
}
else if(com.model==3) { /* Clade models C & D */
if(com.NSsites==NSpselection) {
com.ncatG=3; com.nrate+=3;
}
if(com.NSsites==NSdiscrete) {
if(com.ncatG!=2 && com.ncatG!=3)
error2("use 2 or 3 for ncatG for model=3?");
com.nrate += com.ncatG+1;
}
}
else if(com.NSsites==NSnneutral) {
if(!com.fix_omega) com.nrate++;
else { com.nrate++; com.omega_fix=com.omega; }
}
else if(com.NSsites==NSpselection || com.NSsites==NSM2aRel) {
if(!com.fix_omega) com.nrate+=2;
else { com.nrate++; com.omega_fix=com.omega; }
}
else if(com.NSsites==NSbetaw)
{ if(!com.fix_omega) com.nrate++; else com.omega_fix=com.omega; }
else if(com.NSsites==NSdiscrete && com.aaDist) {
if (com.aaDist<=6) com.nrate+=com.ncatG; /* a&b PSB2000 */
else { /* fitness models */
com.nrate=!com.fix_kappa+4*com.ncatG;
if(com.aaDist==FIT2) com.nrate+=com.ncatG;
}
}
else if(com.NSsites==NSdiscrete)
com.nrate+=com.ncatG; /* omega's */
else if(com.NSsites==NSTgamma || com.NSsites==NSTinvgamma) {
com.nrate += 2 + !com.fix_omega; com.ncatG=KGaussLegendreRule;
}
else if(com.NSsites==NSTgamma1 || com.NSsites==NSTinvgamma1) {
com.nrate += 3+!com.fix_omega; com.ncatG=KGaussLegendreRule+1;
}
}
}
}
else
error2 ("seqtype..");
if(com.runmode==-2 && com.cleandata==0) {
com.cleandata=1;
if(noisy) puts("gaps are removed for pairwise comparison.");
}
if(com.method &&(com.clock||com.rho))
{ com.method=0; puts("Iteration method reset: method = 0"); }
if(com.method && com.seqtype==2 && com.model==FromCodon)
{ com.method=0; puts("\awork on method = 1 for model = 6"); }
if (com.clock && com.fix_blength==2)
error2("can't fix branch lengths under clock model.");
if (com.runmode==3 && (com.clock)) error2("runmode+clock");
if (com.aaDist<=6 && (com.seqtype==CODONseq || com.model==FromCodon))
strcpy(com.daafile, daafiles[abs(com.aaDist)]);
if (com.fix_alpha && com.alpha==0) {
if (com.rho) puts("rho set to 0."); com.fix_rho=1; com.rho=0;
}
if(!com.fix_alpha && com.alpha<=0)
error2("initial value alpha <= 0 for fix_alpha = 0");
if(!com.fix_rho && com.rho==0) { com.rho=0.001; puts("init rho reset"); }
if(com.alpha||com.NSsites)
{ if(com.ncatG<2 || com.ncatG>NCATG) error2("ncatG"); }
else if (com.ncatG>1) com.ncatG=1;
if(com.ndata<=0) com.ndata=1;
if(com.bootstrap && com.ndata!=1) error2("ndata=1 for bootstrap.");
return(0);
}
int testx (double x[], int np)
{
/* This is used for LS branch length estimation by nls2, called only if(clock==0)
*/
int i;
double tb[]={.4e-6, 99};
FOR (i,com.ntime)
if (x[i]<tb[0] || x[i]>tb[1])
return (-1);
return (0);
}
int SetxBound (int np, double xb[][2])
{
int i=-1,j,k, K=com.ncatG;
double tb[]={4e-6,50}, tb0=1e-8, rgeneb[]={0.01,99}, rateb[]={1e-4,999};
double alphab[]={0.02,49}, betab[]={0.005,99}, omegab[]={0.0001,999};
double rhob[]={0.01,0.99}, pb[]={.00001,.99999};
SetxBoundTimes (xb);
for(i=com.ntime;i<np;i++) FOR (j,2) xb[i][j]=rateb[j];
for(i=com.ntime;i<np;i++) { xb[i][0]=rateb[0]; xb[i][1]=rateb[1]; }
for(i=0; i<com.nrgene; i++) for(j=0;j<2;j++) xb[com.ntime+i][j]=rgeneb[j];
for(i=0; i<com.nrate; i++) for(j=0;j<2;j++) xb[com.ntime+com.nrgene+i][j]=rateb[j];
k = com.ntime+com.nrgene+com.nkappa;
/* codon frequency parameters */
k += j = (com.seqtype==CODONseq && com.codonf>=FMutSel0 ? 3 : 0);
if(com.seqtype==CODONseq && com.npi>3
&& (com.codonf==Fcodon || com.codonf==FMutSel0 ||com.codonf==FMutSel)) {
for( ; j<com.npi; j++) {
xb[k][0] = -29; xb[k++][1] = 29;
}
}
/* omega parameters or those in the w distribution */
if (com.NSsites) { /* p's before w's in xb[] */
omegab[0] *= 0.01;
switch(com.NSsites) {
case(NSnneutral):
xb[k][0]=pb[0]; xb[k++][1]=pb[1]; /* p0 */
xb[k][0]=omegab[0]; xb[k++][1]=1; /* w0 < 1 */
break;
case(NSpselection): /* for p0, p1, w2 */
case(NSM2aRel): /* for p0, p1, w2 */
FOR(j,2) { xb[k][0]=-99; xb[k++][1]=99; } /* transformed p */
xb[k][0]=omegab[0]; xb[k++][1]=1; /* w0 < 1 */
if(!com.fix_omega && (com.model==0 || com.model==2)) { /* w2 > 1 */
xb[k][0] = (com.NSsites==NSpselection ? 1 : omegab[0]);
xb[k++][1] = omegab[1];
}
else if (com.model==3)
for(j=0; j<1+!com.fix_omega; j++) {
xb[k][0]=omegab[0]; xb[k++][1]=omegab[1];
}
break;
case(NSdiscrete): /* pK[] & rK[] */
if(com.model==3) { /* Clade model D */
if(com.nparK) error2("model & NSsites & nparK");
FOR(j,K-1) { xb[k][0]=-99; xb[k++][1]=99; }
FOR(j,K+1) { xb[k][0]=omegab[0]; xb[k++][1]=omegab[1]; }
}
else if(com.model==2) { /* branch-site model B */
K=3;
if(com.nparK==0)
FOR(j,K-1) { xb[k][0]=-99; xb[k++][1]=99; }
FOR(j,K) { xb[k][0]=omegab[0]; xb[k++][1]=omegab[1]; }
if(com.nparK)
FOR(j,K*(K-1)) { xb[k][0]=-99; xb[k++][1]=99; }
}
else { /* NSsites models M3 */
FOR(j,K-1) { xb[k][0]=-99; xb[k++][1]=99; }
FOR(j,K) { xb[k][0]=omegab[0]; xb[k++][1]=omegab[1]; }
}
if(com.seqtype==CODONseq && com.aaDist)
FOR(j,K) { xb[k][0]=omegab[0]; xb[k++][1]=omegab[1]; }
break;
case(NSfreqs): /* p0...pK */
FOR(j,K-1) { xb[k][0]=-99; xb[k++][1]=99; }
break;
case(NSgamma):
FOR(j,2) { xb[k][0]=alphab[0]; xb[k++][1]=alphab[1]; } break;
case(NS2gamma): /* p0, alpha1,beta1,alpha2=beta2 */
xb[k][0]=pb[0]; xb[k++][1]=pb[1];
FOR(j,3) { xb[k][0]=alphab[0]; xb[k++][1]=alphab[1]; }
break;
case(NSbeta): /* p_beta,q_beta */
FOR(j,2) { xb[k][0]=betab[0]; xb[k++][1]=betab[1]; }
break;
case(NSbetaw):
/* p0, p_beta, q_beta, w */
xb[k][0]=pb[0]; xb[k++][1]=pb[1]; /* p0 */
FOR(j,2) { xb[k][0]=betab[0]; xb[k++][1]=betab[1]; } /* p & q */
if(!com.fix_omega) { xb[k][0]=1; xb[k++][1]=omegab[1]; }
break;
case(NSbetagamma): /* p0, p_beta, q_beta, alpha, beta */
xb[k][0]=pb[0]; xb[k++][1]=pb[1]; /* p0 */
FOR(j,4) { xb[k][0]=betab[0]; xb[k++][1]=betab[1]; } /* p&q, a&b */
break;
case(NSbeta1gamma): /* p0, p_beta, q_beta, alpha, beta */
xb[k][0]=pb[0]; xb[k++][1]=pb[1]; /* p0 */
FOR(j,4) { xb[k][0]=betab[0]; xb[k++][1]=betab[1]; } /* p&q, a&b */
break;
case(NSbeta1normal): /* p0, p_beta, q_beta, mu, s */
xb[k][0]=pb[0]; xb[k++][1]=pb[1]; /* p0 */
FOR(j,4) { xb[k][0]=betab[0]; xb[k++][1]=betab[1]; } /* p&q, mu&s */
xb[k-2][0]=1; xb[k-2][1]=9; /* mu */
break;
case(NS02normal): /* p0, p1, mu2, s1, s2 */
FOR(j,2) { xb[k][0]=pb[0]; xb[k++][1]=pb[1]; } /* p0 & p1, */
FOR(j,3) { xb[k][0]=.0001; xb[k++][1]=29; } /* mu2,s1,s2 */
break;
case(NS3normal): /* p0, p1, mu2, s0, s1, s2 */
FOR(j,2) { xb[k][0]=-49; xb[k++][1]=49; } /* p0 & p1, tranformed */
FOR(j,4) { xb[k][0]=.0001; xb[k++][1]=29; } /* mu2,s0,s1,s2 */
break;
case(NSTgamma):
case(NSTinvgamma):
case(NSTgamma1):
case(NSTinvgamma1):
if(com.NSsites==NSTgamma1 || com.NSsites==NSTinvgamma1) /* p0 for G(a,b,T) */
{ xb[k][0]=0.001; xb[k++][1]=0.9999; }
/* alpha */
xb[k][0]=0.05;
if(com.NSsites==NSTinvgamma || com.NSsites==NSTinvgamma1)
xb[k][0]=1.05;
xb[k++][1]=alphab[1]; /* alpha */
xb[k][0]=0.05; xb[k++][1]=betab[1]; /* beta */
if(!com.fix_omega)
{ xb[k][0]=1; xb[k++][1]=29; } /* T */
break;
}
}
else if((com.seqtype==CODONseq||com.model==FromCodon) && com.aaDist!=AAClasses)
{ if(!com.fix_omega) { xb[k][0]=omegab[0]; xb[k][1]=omegab[1]; } }
if(com.seqtype==CODONseq && com.model)
for(j=0; j<com.nOmega-com.fix_omega; j++)
{ xb[k+j][0]=omegab[0]; xb[k+j][1]=omegab[1]; }
if (com.aaDist<0 && (com.seqtype==1||com.model==FromCodon)) {
/* linear relationship between d_ij and w_ij */
if(com.nrate != !com.fix_kappa+1+(com.seqtype==1)) error2("in Setxbound");
xb[com.ntime+com.nrgene+!com.fix_kappa][1]=1; /* 0<b<1 */
}
k=com.ntime+com.nrgene+com.nrate;
for (i=0;i<com.nalpha;i++,k++) FOR (j,2) xb[k][j]=alphab[j];
if (!com.fix_rho) FOR (j,2) xb[np-1][j]=rhob[j];
if(noisy>=3 && np<100) {
printf("\nBounds (np=%d):\n",np);
for(i=0;i<np;i++) printf(" %10.6f", xb[i][0]); FPN(F0);
for(i=0;i<np;i++) printf(" %10.6f", xb[i][1]); FPN(F0);
}
return(0);
}
void getpcodonClass(double x[], double pcodonClass[])
{
/* This uses pcodon0[], paa0[], and x[] to calculate pcodonclass[] and
com.pi[] for the fitness models.
pcodon0[] has the codon frequencies observed (codonFreq=3) or expected
(codonFreq=2 or 1 or 0) rootally. Under the fitness models, the expected
codon frequencies pcodonClass[] differs among site classes and from the
rootal pi[] (pcodon0[]).
This is called by SetParameters().
*/
int i,iclass,iaa, k, nclass=(com.NSsites==0?1:com.ncatG);
double paaClass[20], *w,fit;
if(com.seqtype!=1 || com.aaDist<FIT1) error2("getpcodonClass");
k=com.ntime+com.nrgene+!com.fix_kappa+nclass-1;
FOR(iclass, nclass) {
w=x+k+iclass*(4+(com.aaDist==FIT2));
FOR(iaa,20) {
fit = -w[0]*square(AAchem[0][iaa]-w[1])
-w[2]*square(AAchem[1][iaa]-w[3]);
paaClass[iaa]=exp(2*fit);
}
abyx(1/sum(paaClass,20), paaClass, 20);
FOR(i,com.ncode) {
iaa=GeneticCode[com.icode][FROM61[i]];
pcodonClass[iclass*64+i]=pcodon0[i]/paa0[iaa]*paaClass[iaa];
}
if(fabs(1-sum(pcodonClass+iclass*64,com.ncode))>1e-5) error2("pcodon!=1");
/*
fprintf(frst,"\nSite class %d: ",iclass+1);
matout (frst,paaClass,2, 10);
matout (frst,pcodonClass+iclass*64,16,4);
*/
}
if(nclass==1) FOR(i,com.ncode) com.pi[i]=pcodonClass[i];
}
int GetInitialsCodon (double x[])
{
/* This sets the initials and count com.np for codon models.
*/
int k=com.ntime+com.nrgene, i,j, K=com.ncatG, nsyncodon[20];
double mr=0;
if(com.nrate) { /* either kappa, omega, or both for each gene */
if(com.Mgene<=2) {
if(com.hkyREV) {
x[k++]=.5+rndu();
for(i=0; i<4; i++) x[k++]=.1+rndu();
}
else if (!com.fix_kappa)
x[k++] = com.kappa;
if(com.codonf==FMutSel0 || com.codonf==FMutSel) {
for(i=0;i<3;i++) /* pi_TCA */
x[k++] = com.pf3x4[i]/(com.pf3x4[3]+.02*rndu());
if(com.npi>3 && com.codonf==FMutSel0) {
for(i=0; i<20; i++) nsyncodon[i]=0;
for(i=0; i<com.ncode; i++)
nsyncodon[GeneticCode[com.icode][FROM61[i]]] ++;
for(i=0; i<20-1; i++) /* amino acid fitness, ignoring nsyncodon */
x[k++] = log((com.piAA[i]/nsyncodon[i]+.001)/(com.piAA[19]/nsyncodon[19]+.002*rndu()));
}
else if(com.npi>3 && com.codonf==FMutSel) {
for(i=0;i<com.ncode-1;i++) /* codon fitness */
x[k++] = log((com.pi[i]+.001)/(com.pi[com.ncode-1]+.002*rndu()));
}
}
else if(com.npi) {
if(com.codonf==Fcodon)
for(i=0;i<com.ncode-1;i++) /* codon fitness */
x[k++] = log((com.pi[i]+.001)/(com.pi[com.ncode-1]+.002*rndu()));
else if(com.codonf==F1x4 || com.codonf==F1x4MG)
for(i=0;i<3;i++) /* pi_TCA */
x[k++] = com.pf3x4[i]/(com.pf3x4[3]+.02*rndu());
else if(com.codonf==F3x4 || com.codonf==F3x4MG)
for(j=0; j<3; j++)
for(i=0;i<3;i++) /* pi_TCA */
x[k++] = com.pf3x4[j*4+i]/(com.pf3x4[j*4+3]+.02*rndu());
}
if (com.NSsites==0 && com.model==0) {
if (!com.aaDist)
{ if(!com.fix_omega) x[k++]=com.omega; }
else if (com.aaDist==AAClasses)
for(i=0; i<com.nOmegaType; i++)
x[k++]=0.11+0.1*rndu();
else
{ x[k++]=0.11+0.1*rndu(); x[k++]=0.22+0.1*rndu(); }
}
}
else { /* com.Mgene==3,4 */
if(com.Mgene>=3) {
com.nrate *= com.ngene;
if(com.fix_omega) com.nrate--;
}
for(i=0; i<com.ngene; i++) {
if(com.hkyREV)
error2("hkyREV for ngene>1. Fix me.");
if(!com.fix_kappa && !com.fix_omega)
{ x[k++] = com.kappa; x[k++] = com.omega; }
else if (com.fix_kappa)
x[k++] = com.omega;
else if (com.fix_omega) {
x[k++] = com.kappa;
if(i!=com.ngene-1) x[k++] = com.omega;
}
}
}
}
if(com.model && com.model<=NSbranch3) { /* branch models */
if (com.model==NSbranchB) {
com.nbtype = tree.nbranch;
for(i=0; i<tree.nbranch; i++)
nodes[(int)tree.branches[i][1]].label = i;
}
if(com.NSsites==0) {
com.nOmega = com.nbtype;
if(com.aaDist==0)
com.nrate = com.nkappa+!com.fix_omega+com.nbtype-1;
else if (com.aaDist==AAClasses)
com.nrate = com.nkappa + com.nOmegaType*com.nbtype;
else if (com.model==NSbranchB || com.model==NSbranch2)
com.nrate += (com.model==NSbranchB ? tree.nbranch : com.nOmega-1+!com.fix_omega);
k = com.ntime+com.nrgene;
for(i=0; i<com.nrate; i++)
x[k++] = com.omega * (0.8+0.4*rndu());
}
}
if (com.NSsites==0 && com.nrate==0)
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr, (com.nkappa>1?x+com.ntime+com.nrgene:&com.kappa), com.omega,PMat);
/* branch-site and clade models
com.nOmega=2 different w's at a site (three w's in the model: w0,w1,w2) */
if(com.model && com.NSsites) {
if(com.model==NSbranch2) { /* branch-site models A & B */
com.ncatG=4; K=3;
if(com.NSsites==NSdiscrete)
com.nrate = com.nkappa +com.npi + 2 +!com.fix_omega+com.nbtype-1-1; /* add w0 and w1 */
else
com.nrate = com.nkappa +com.npi +1+!com.fix_omega+com.nbtype-1-1;
}
/* add p0 and p1. check that this works for NSbranch2 */
k = com.ntime+com.nrgene+com.nkappa+com.npi;
if(com.model<=NSbranch2) { /* branch-site models A & B */
/* p0 and p1: x[0,1]=1,0, for p[]=0.6 0.2 */
x[k++] = 1+0.5*rndu();
if(K==3) x[k++] = 0.2*rndu();
if(com.NSsites == 2) /* w0<1, w1=1 (if present) */
x[k++] = 0.2+0.1*rndu();
else if(com.NSsites == NSdiscrete) { /* w0 and w1 for model B */
x[k++] = 0.2*rndu();
if(K==3) x[k++] = 0.4+.8*rndu();
}
if(!com.fix_omega)
x[k++] = com.omega + 1 + rndu(); /* w2 */
}
else { /* NSbranch3: clade models C and D */
x[k++] = 1 + rndu();
if(com.ncatG == 3) x[k++] = .5+rndu(); /* p0 and p1 */
if(com.NSsites == NSpselection) /* w0<1, w1=1 (if present) */
x[k++] = 0.2+0.2*rndu();
else if(com.NSsites == NSdiscrete) { /* w0 and w1 */
x[k++] = 0.2+0.2*rndu();
if(com.ncatG==3) x[k++] = 0.5+.5*rndu();
}
for(i=0; i<com.nbtype-1; i++) /* additional w's */
x[k++] = com.omega*(1+0.5*rndu());
if(!com.fix_omega)
x[k++] = com.omega*(1+0.5*rndu());
}
}
else if (com.NSsites) { /* w's are counted in com.nrate */
switch(com.NSsites) {
case(NSnneutral):
x[k++]=0.5+0.4*rndu(); /* p0 for w0<1 */
x[k++]=0.1+0.5*rndu(); /* w0<1 */
break;
case(NSpselection): /* for p0, p1. w is counted in nrate. */
case(NSM2aRel):
x[k++] = 0.8+rndu(); x[k++]=.1+.5*rndu(); /* p0, p1 */
x[k++] = 0.1+0.4*rndu(); /* w0<1 */
if(!com.fix_omega) {
x[k++] = com.omega*(1+0.2*rndu()); /* w2 */
if(com.omega<1 && com.NSsites==NSpselection) {
puts("\ninitial w for M2:NSpselection reset.");
x[k-1] = 2+rndu();
}
}
break;
case(NSdiscrete):
if(com.aaDist) {
for(i=0; i<com.ncatG-1; i++) x[k++]=0.;
if(com.aaDist<=6)
for(i=0;i<com.ncatG;i++) { x[k++]=1.1; x[k++]=1.2; }
for(i=0;i<com.ncatG;i++) /* ap,p*,av,v*, and b for each site class */
FOR(j,4+(com.aaDist==FIT2)) x[k++]=rndu();
}
else if(com.nparK) { /* K*(K-1) paras in HMM of dN/dS over sites */
zero(x+k,com.ncatG*(com.ncatG-1));
k += com.ncatG*(com.ncatG-1);
}
else { /* p0...pK. Note that w's are counted in nrate */
for(i=0;i<com.ncatG-1;i++) x[k++]=rndu();
for(i=0;i<com.ncatG;i++)
x[k++]=com.omega * (.5+i*2./com.ncatG*(0.8+0.4*rndu()));
}
break;
case(NSfreqs): /* p0...pK. w's are fixed */
for(i=0;i<com.ncatG-1;i++) x[k++]=(com.ncatG-j)/2.;
break;
case(NSgamma): x[k++]=1.1; x[k++]=1.1; break;
case(NS2gamma): /* p0, alpha1,beta1,alpha2=beta2 */
x[k++]=0.5; FOR(j,3) x[k++]=2*rndu()+j*0.1; break;
case(NSbeta): /* p_beta,q_beta */
x[k++]=.2+rndu(); x[k++]=1+rndu(); break;
case(NSbetaw):
/* p0, p_beta, q_beta. w is counted in nrate. */
x[k++]=.9; x[k++]=.2+rndu(); x[k++]=1+rndu();
if(!com.fix_omega) {
x[k++]=com.omega;
if(com.omega<1) {
puts("\ninitial w for M8:NSbetaw>1 reset.");
x[k-1]=2+rndu();
}
}
break;
case(NSbetagamma): /* p0, p_beta, q_beta, alpha, beta */
x[k++]=.9; x[k++]=.4; x[k++]=1.2; x[k++]=1.1; x[k++]=1.1;
break;
case(NSbeta1gamma): /* p0, p_beta, q_beta, alpha, beta */
x[k++]=.9; x[k++]=.4; x[k++]=1.2; x[k++]=.1; x[k++]=1.1;
break;
case(NSbeta1normal): /* p0, p_beta, q_beta, alpha, beta */
x[k++]=.95; x[k++]=.4; x[k++]=1.2; x[k++]=1.1; x[k++]=1.1;
break;
case(NS02normal): /* p0, p1, mu2, s1, s2 */
x[k++]=.8; x[k++]=0.3; /* p0 & p1, not transformed */
x[k++]=.2; /* mu2 */
x[k++]=5; x[k++]=1.1; /* s1,s2 */
break;
case(NS3normal): /* p0, p1, mu2, s0, s1, s2 */
x[k++]=.77; x[k++]=0.22; /* p0 & p1, transformed */
x[k++]=.2; /* mu2 */
x[k++]=0.5; x[k++]=5; x[k++]=1.1; /* s0,s1,s2 */
break;
case(NSTgamma): /* alpha, beta, T */
case(NSTgamma1): /* p0, alpha, beta, T */
if(com.NSsites==NSTgamma1)
x[k++]=0.8+0.2*rndu(); /* p0, not transformed */
x[k++]=2+rndu(); x[k++]=3+rndu();
if(!com.fix_omega) x[k++]=1.+rndu();
break;
case(NSTinvgamma): /* alpha, beta, T */
case(NSTinvgamma1): /* p0, alpha, beta, T */
if(com.NSsites==NSTinvgamma1)
x[k++]=0.8+0.2*rndu(); /* p0, not transformed */
x[k++]=3+rndu(); x[k++]=0.8+0.2*rndu(); /* mean = b/(a-1) */
if(!com.fix_omega) x[k++]=1.+rndu();
break;
}
} /* if(com.NSsites) */
com.np = k;
return(0);
}
int GetInitials (double x[], int* fromfile)
{
/* This caculates the number of parameters (com.np) and get initial values.
This routine is too messy. Perhaps try to restruct the code and make
two sections for amino acids and codons?
com.nrate is initialised in getoptions().
*/
static int times=0;
int i, j,k=0, naa=20;
int K=(com.model==2&&com.NSsites?com.ncatG-1:com.ncatG);
size_t sconP_new = (size_t)(tree.nnode-com.ns)*com.ncode*com.npatt*sizeof(double);
double t;
NFunCall = NPMatUVRoot = NEigenQ = 0;
if(com.clock==ClockCombined && com.ngene<=1)
error2("Combined clock model requires mutliple genes.");
GetInitialsTimes(x);
com.plfun = (com.alpha==0 ? lfun : (com.rho==0?lfundG:lfunAdG));
if(com.NSsites) com.plfun=lfundG;
if(com.nparK) com.plfun=lfunAdG;
if(com.plfun==lfun) com.conPSiteClass=0;
if(com.method && com.fix_blength!=2 && com.plfun==lfundG) {
com.conPSiteClass=1;
sconP_new *= com.ncatG;
}
if(com.sconP<0 || sconP_new<0) error2("data set too large.");
if(com.sconP<sconP_new) {
com.sconP = sconP_new;
printf("\n%9lu bytes for conP, adjusted\n", com.sconP);
if((com.conP=(double*)realloc(com.conP, com.sconP))==NULL)
error2("oom conP");
}
InitializeNodeScale();
if(times++==0) {
if((com.aaDist && com.aaDist<10 && com.aaDist!=AAClasses &&
(com.seqtype==CODONseq||com.model==FromCodon)) ||
(com.seqtype==AAseq &&
(com.model==Empirical||com.model==Empirical_F||com.model>=REVaa_0))){
GetDaa(NULL,com.daa);
}
}
com.nrgene = (!com.fix_rgene)*(com.ngene-1);
for(j=0; j<com.nrgene; j++) x[com.ntime+j] = 1;
if(com.seqtype==CODONseq)
GetInitialsCodon(x);
else {
com.np = com.ntime+com.nrgene+com.nrate;
k=com.ntime+com.nrgene;
if (com.aaDist==AAClasses) {
if (!com.fix_kappa) x[k++]=com.kappa;
for(i=0; i<com.nrate-!com.fix_kappa; i++)
x[k++] = com.omega;
if (com.nOmegaType>65)
puts("\a\nget better initial values for AAclasses?");
}
else {
if (com.seqtype==AAseq) { /* AAseq */
if (com.nrate==0)
EigenQaa(NULL, Root, U, V, &t); /* once for all */
if (com.model==REVaa_0) {
for(i=0;i<naa;i++) for(j=0;j<i;j++)
if (AA1STEP[i*(i-1)/2+j] && i*naa+j!=ijAAref)
x[k++] = com.daa[i*naa+j];
}
else if (com.model==REVaa) {
for (i=1; i<naa; i++) for(j=0; j<i; j++)
if(i*naa+j != ijAAref) x[k++] = com.daa[i*naa+j];
}
else if (com.model==FromCodon) {
for(j=0; j<com.nkappa; j++) x[k++] = com.kappa;
for(j=0; j<com.nrate-com.nkappa; j++) x[k++] = com.omega;
}
}
}
}
for (i=0; i<com.nalpha; i++) x[com.np++] = com.alpha;
if (!com.fix_rho) x[com.np++] = com.rho;
if (com.rho)
AutodGamma (com.MK, com.freqK, com.rK, &t, com.alpha, com.rho,com.ncatG);
else if (com.alpha && com.fix_alpha && !com.NSsites)
DiscreteGamma(com.freqK,com.rK,com.alpha,com.alpha,com.ncatG,DGammaUseMedian);
if(com.fix_blength==-1)
for(i=0; i<com.np; i++) x[i] = (i<com.ntime ? .1+0.5*rndu() : 0.5+rndu());
*fromfile=0;
if(finitials) {
readx(x,fromfile);
if(com.runmode>0 && fromfile && com.NSsites) LASTROUND=1;
}
return (0);
}
int SetPGene (int igene, int _pi, int _UVRoot, int _alpha, double x[])
{
/* xcom[] does not contain time parameters
Note that com.piG[][] have been homogeneized if (com.Mgene==3)
Note calculation of nr1 for (com.Mgene>=3 && com.fix_omega), as only the
w for the last partition is fixed.
*/
int nr1=(com.nrate+1)/com.ngene, k=com.nrgene+(com.Mgene>=3)*igene*nr1;
double *xcom=x+com.ntime, mr=0;
if (_pi) {
xtoy (com.piG[igene],com.pi,com.ncode);
#if(defined(CODEML))
if(com.codonf==F1x4MG || com.codonf==F3x4MG)
com.pf3x4 = com.f3x4[igene];
#endif
}
if (_UVRoot) {
if (com.seqtype==CODONseq) {
if(!com.fix_kappa) com.kappa=xcom[k++];
if(!com.fix_omega) com.omega=xcom[k++];
else
com.omega = (com.Mgene>2&&igene<com.ngene-1?xcom[k++]:com.omega_fix);
if (!com.NSsites)
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr,
(com.hkyREV||com.codonf==FMutSel?&xcom[com.nrgene]:&com.kappa),com.omega,PMat);
}
else
EigenQaa(NULL, Root, U, V, xcom+k);
}
if (_alpha) {
com.alpha=xcom[com.nrgene+com.nrate+igene];
DiscreteGamma (com.freqK, com.rK, com.alpha, com.alpha, com.ncatG, DGammaUseMedian);
}
return (0);
}
int SetParametersNSsites (double x[])
{
/* for NSsites and NSbranchsite models including HMM, NSclade models
p's are before w's in x[].
w[2][3] holds omegas; w[i][j] for fore (i=0) or back (i=1) branches
in site class j.
A & B: branch-site models: (model=2, NSsites=2 or 3)
iclass
0 1 2 3
back w0 w1 w0 w1
fore w0 w1 w2 w2
C & D: clade-site models: (model=3, NSsites=2 or 3)
(D: nbtype = 2)
iclass
0 1 2
b0 w0 w1 w2
b1 w0 w1 w3
b2 w0 w1 w4
......
*/
int k0=com.ntime+com.nrgene+com.nkappa+com.npi, k=k0;
int K=com.ncatG, i,j, off;
double w[NBTYPE][3], t, S,dS,dN, spaceP2PI[NCATG*(NCATG+1)], small=1e-4;
double mr, f;
double p0=1, c,e,eT, dwy, y,z, a, b, T, C, lnGa, ww, sign, *xI=NULL, *wI=NULL; /* truncated NSsites models */
if(com.NSsites==0) error2("SetParametersNSsites : strange.");
switch(com.NSsites) {
case(NSnneutral):
com.freqK[0] = x[k++];
com.freqK[1] = 1-com.freqK[0];
com.rK[0] = x[k++];
com.rK[1] = 1;
break;
case(NSpselection):
case(NSM2aRel):
case(NSdiscrete):
if(com.model == NSbranch2) /* branch-site A&B (Y&N2002) */
K = com.ncatG-1;
if(com.nparK) { /* HMM models, setting up p[] & w[] */
for(j=0; j<K; j++) /* w's for site classes */
com.rK[j] = x[k++];
for (i=0; i<K; i++, k+=K-1) {
if (!LASTROUND) f_and_x(x+k,com.MK+i*K,K,0,0); /* x->f */
else xtoy (x+k,com.MK+i*K,K-1);
com.MK[i*K+K-1] = 1-sum(com.MK+i*K,K-1);
}
PtoPi(com.MK, com.freqK, K, spaceP2PI);
break;
}
/* *** Note: Falling through.
This sets up p[] for NSpselection, NSdiscrete, NSfreqs
*/
case(NSfreqs):
if (!LASTROUND) {
f_and_x(x+k,com.freqK,K,0,1); /* x->f */
k += K-1;
}
else {
for(j=0,com.freqK[K-1]=1; j<K-1; j++)
com.freqK[K-1] -= (com.freqK[j] = x[k++]);
if(com.freqK[K-1]<-small || com.freqK[K-1]>1+small) {
matout(F0, com.freqK, 1, K);
error2("freqK[]");
}
}
/* setting up w[] */
if(com.NSsites == NSfreqs) {
if(com.ncatG!=5) error2("NSfreqs, ncatG?");
com.rK[0] = 0;
com.rK[1] = 1./3;
com.rK[2] = 2./3;
com.rK[3] = 1;
com.rK[4] = 3;
}
else if(com.NSsites == NSpselection || com.NSsites == NSM2aRel) {
com.rK[0] = x[k++];
com.rK[1] = 1;
com.rK[2] = (com.fix_omega && com.model<=2 ? com.omega_fix : x[k++]);
}
else if(com.NSsites == NSdiscrete && com.aaDist == 0) {
for(j=0; j<K; j++)
com.rK[j] = x[k++];
}
if(com.model) { /* branch-site and clade models */
if(com.model == NSbranch2) { /* branch-site models */
w[0][0] = w[1][0] = com.rK[0]; /* site class 0 */
w[0][1] = w[1][1] = com.rK[1]; /* site class 1 */
w[0][2] = -1;
w[1][2] = com.rK[2];
}
else { /* clade models */
k--;
for(i=0; i<com.nbtype; i++) {
for(j=0; j<K-1; j++)
w[i][j] = com.rK[j];
w[i][K-1] = (i==com.nbtype-1 && com.fix_omega ? com.omega_fix : x[k++]);
}
}
}
break;
case(NSgamma):
case(NS2gamma):
case(NSbeta):
case(NSbetaw):
case(NSbetagamma):
case(NSbeta1gamma):
case(NSbeta1normal):
case(NS02normal):
case(NS3normal):
DiscreteNSsites(x+k);
break;
}
/* rK[] & freqK[] for truncated nssites models. */
if(com.NSsites>=NSTgamma && com.NSsites<=NSTinvgamma1) {
off = (com.NSsites==NSTgamma1||com.NSsites==NSTinvgamma1);
if(off) {
K = com.ncatG-1;
p0 = x[k];
com.rK[K] = 1;
com.freqK[K] = 1 - p0;
}
a = x[k+off];
b = x[k+off+1];
T = (com.fix_omega ? com.omega_fix : x[k+off+2]);
K = com.ncatG-off;
lnGa = LnGamma(a);
if(com.NSsites==NSTgamma || com.NSsites==NSTgamma1) {
C = CDFGamma(T, a, b);
mr = a/(C*b)*CDFGamma(T, a+1, b);
}
else {
C = 1 - CDFGamma(1/T, a, b);
mr = b/(C*(a-1))*( 1 - CDFGamma(1/T, a-1, b) );
}
GaussLegendreRule(&xI, &wI, K);
/* w changes monotonically from 0 to T. */
for(j=0; j<K; j++) {
if(j<K/2) { i = K/2-1-j; sign=-1; }
else { i = j-K/2; sign=1; }
#if(0) /* linear transform */
y = sign*xI[i];
com.rK[j] = ww = (1+y)*T/2;
dwy = T/2;
#else /* exponential transform */
c = 1;
eT = exp(-c*T);
y = -sign*xI[i];
z = 1 + eT + y - y*eT;
com.rK[j] = ww = -1/c*log(z/2);
dwy = (1 - eT)/(c*z);
#endif
if(com.NSsites==NSTgamma || com.NSsites==NSTgamma1)
com.freqK[j] = exp( a*log(b*ww)-lnGa-b*ww )/(ww*C) * p0*wI[i]*dwy;
else
com.freqK[j] = exp( a*log(b/ww)-lnGa-b/ww ) /(ww*C) * p0*wI[i]*dwy;
}
/*
printf("\na b T lnGa=%9.5f%9.5f%9.5f %9.5f\nf & w:\n", a,b,T, lnGa);
FOR(j,com.ncatG) printf("%13.5f", com.freqK[j]); FPN(F0);
FOR(j,com.ncatG) printf("%13.5f", com.rK[j]); FPN(F0);
*/
}
/* For NSsites models, calculates Qfactor_NS, to be used in EigenQcodon().
For branch-site and clade models, calculate Qfactor_NS[] and also
UVRoot for different omega's.
*/
k = k0;
if(com.model == 0) { /* NSsites models */
if(com.aaDist==0) {
if(com.NSsites<NSTgamma || com.NSsites>NSTinvgamma1) /* mr already calculated for truncated models */
for(j=0,mr=0; j<com.ncatG; j++)
mr += com.freqK[j]*com.rK[j];
Qfactor_NS = -1;
EigenQcodon(0,-1,&S,&dS,&dN,NULL,NULL,NULL, &Qfactor_NS, com.pkappa, mr, PMat);
}
else {
for(j=0,Qfactor_NS=0; j<com.ncatG; j++) {
if(com.aaDist<10)
com.pomega = x+k+com.ncatG-1+2*j;
else if(com.aaDist >= FIT1) {
com.pomega = x+k+com.ncatG-1+j*(4+(com.aaDist==FIT2));
xtoy(pcodonClass+j*64, com.pi, com.ncode);
}
mr = -1;
EigenQcodon(0,-1,&S,&dS,&dN,NULL,NULL,NULL, &mr, com.pkappa, com.rK[j], PMat);
Qfactor_NS += com.freqK[j]*mr;
}
}
Qfactor_NS = 1/Qfactor_NS;
if(NFunCall==1) printf("Qfactor_NS = %.6f\n", Qfactor_NS);
}
else if (com.model == NSbranch2) { /* branch&site models */
t = com.freqK[0] + com.freqK[1];
if(t<1e-100)
error2("p0 + p1 too small for branch&site model?");
com.freqK[2] = (1-t)*com.freqK[0]/t;
com.freqK[3] = (1-t)*com.freqK[1]/t;
/* calculates scale factors: background branches has two site classes
while foreground branches has 3 site classes */
for(i=0; i<2; i++) { /* i=0 back (2 site classes); i=1 fore (3 classes) */
for(j=0,mr=0; j<(i==0?2:3); j++) {
com.omega = w[i][j];
f = com.freqK[j];
if(i==0) f = com.freqK[j]/t;
else if(j==2) f = 1-t;
if(NFunCall==1) printf("branch=%d freq=%.6f w%d = %.6f\n", i,f,j,com.omega);
mr += f*com.omega;
}
Qfactor_NS_branch[i] = -1;
EigenQcodon(0,-1,&S,&dS,&dN,NULL,NULL,NULL, &Qfactor_NS_branch[i], com.pkappa, mr, PMat);
Qfactor_NS_branch[i] = 1/Qfactor_NS_branch[i];
if(NFunCall==1) printf("\t\t\tQfactor for branch %d = %.6f\n", i,Qfactor_NS_branch[i]);
}
/* calculates 3 sets of U&V&Root vectors (w0,w1,w2), for GetPMatBranch().
No EigenQcodon() calls are needed in ConditionalPNode() or minbranches().
*/
for(i=0; i<3; i++) { /* (w0,w1,w2) */
if(NFunCall==1) printf("w[%d] = %.6f\n", i, w[1][i]);
mr = 1;
EigenQcodon(1,-1,NULL,NULL,NULL,_Root[i],_UU[i],_VV[i], &mr, com.pkappa,w[1][i],PMat);
}
}
else { /* NSbranch3: Clade models C and D */
/* calculates Qfactor_NS_branch[nbtype]: each branch has K=com.ncatG site classes */
for(i=0; i<com.nbtype; i++) {
for(j=0,mr=0; j<K; j++)
mr += com.freqK[j] * w[i][j];
Qfactor_NS_branch[i] = -1;
EigenQcodon(0,-1,NULL,NULL,NULL,NULL,NULL,NULL, &Qfactor_NS_branch[i], com.pkappa,mr,PMat);
Qfactor_NS_branch[i] = 1/Qfactor_NS_branch[i];
if(NFunCall==1) printf("\t\t\tQfactor for branch=%d = %.6f\n", i,Qfactor_NS_branch[i]);
}
/* calculates K-1+nbtype sets of U&V&Root vectors (w0,w1,w2, w3,...), for GetPMatBranch().
*/
for(i=0; i<K-1+com.nbtype; i++) {
mr = 1;
com.omega = (i < K-1 ? w[0][i] : w[i-K+1][K-1]);
EigenQcodon(1,-1,NULL,NULL,NULL,_Root[i],_UU[i],_VV[i], &mr, com.pkappa,com.omega,PMat);
}
}
return(0);
}
int Set_UVR_BranchSite (int iclass, int branchlabel)
{
/* There are 3 different w's in the branch-site models A & B, and nbtype+2
different w's in the clade models C & B, so there are the same number of
sets of U&V&Root. This routine points out the right set.
*/
int iUVR=0;
if(com.model==0 || com.NSsites==0) error2("should not be here.");
if(com.model<=NSbranch2) { /* branch-site models A & B */
if(branchlabel==0) iUVR = iclass%2; /* back, w0 w1 */
else iUVR = (iclass<=1 ? iclass : 2); /* fore, w0 w1 w2 */
}
else { /* clade models C & D */
if(iclass<com.ncatG-1) iUVR = iclass;
else iUVR = com.ncatG-1 + branchlabel;
}
U = _UU[iUVR];
V = _VV[iUVR];
Root = _Root[iUVR];
return (iUVR);
}
int GetCodonFreqs (void)
{
/* This is called by SetParameters() and calculates the expected base or codon frequencies
(com.pf3x4[] & com.pi[]) using the parameters under the model com.codonf.
This is used for models in which codon frequency parameters are estimated from
the data by ML. Modified from GetCodonFreqs2().
com.pi[] is modified.
The routine does not work if com.ngene>1.
*/
int n=com.ncode, i,j,k, ic,iaa,b[3];
double *ppi=com.ppi, mutbias[20], y;
if (com.codonf==Fcodon) {
for(i=0; i<n; i++)
com.pi[i] = (i==n-1 ? 1 : exp(com.ppi[i]));
abyx (1./sum(com.pi,n), com.pi, n);
return(0);
}
for(j=0;j<3;j++) {
xtoy(ppi, com.pf3x4+j*4, 3);
com.pf3x4[j*4+3] = 1;
abyx (1./sum(com.pf3x4+j*4,4), com.pf3x4+j*4, 4);
if(com.codonf==F3x4 || com.codonf==F3x4MG)
ppi += 3;
}
if(com.codonf==FMutSel && com.npi==3) return(0);
if ((com.codonf>=F1x4 && com.codonf<=F3x4MG) || com.npi>3) {
for (i=0; i<n; i++) {
ic=FROM61[i]; b[0]=ic/16; b[1]=(ic/4)%4; b[2]=ic%4;
com.pi[i] = com.pf3x4[b[0]]*com.pf3x4[4+b[1]]*com.pf3x4[8+b[2]];
}
}
if (com.codonf==FMutSel && com.npi>3) {
for(i=0; i<n-1; i++) /* last codon has fitness 0 */
com.pi[i] *= exp(com.ppi[3+i]);
}
else if (com.codonf==FMutSel0 && com.npi>3) {
for(i=0; i<n; i++) { /* last amino acid has fitness 0 */
iaa = GeneticCode[com.icode][FROM61[i]];
if(iaa<19) com.pi[i] *= exp(com.ppi[3+iaa]);
}
for(i=0,zero(com.piAA,20); i<n; i++)
com.piAA[GeneticCode[com.icode][FROM61[i]]] += com.pi[i];
abyx (1./sum(com.piAA,20), com.piAA, 20);
}
else if (com.codonf==FMutSel0 && com.npi==3) {
for (i=0,zero(mutbias,20); i<n; i++) {
ic=FROM61[i]; iaa = GeneticCode[com.icode][ic];
b[0]=ic/16; b[1]=(ic/4)%4; b[2]=ic%4;
mutbias[iaa] += com.pf3x4[b[0]]*com.pf3x4[b[1]]*com.pf3x4[b[2]];
}
for(i=0; i<n; i++) {
ic=FROM61[i]; iaa = GeneticCode[com.icode][ic];
b[0]=ic/16; b[1]=(ic/4)%4; b[2]=ic%4;
y = com.pf3x4[b[0]]*com.pf3x4[b[1]]*com.pf3x4[b[2]];
com.pi[i] = y/mutbias[iaa] * com.piAA[iaa];
}
y = sum(com.pi, n);
}
abyx (1./sum(com.pi,n), com.pi, n);
return (0);
}
int SetParameters (double x[])
{
/* Set com. variables and initialize U, V, Root etc. before each calculation
of the likelihood function.
Is it a good idea to restruct this and/or Getinitials into two parts,
one for aa's and another for codons?
When (com.NSsites==NS02normal || NS3normal), p's are before w's in x[];
see CDFdN_dS().
*/
int i,j,k, ik=0, nUVR=NBTYPE+2;
double t,w0=-1, mr=0;
if(com.clock>=5) return(0);
if(com.fix_blength<2) SetBranch(x);
if(com.np<=com.ntime) return(0);
if(com.seqtype==1 || com.model==FromCodon || com.aaDist==AAClasses) {
k = com.ntime+com.nrgene;
if(com.hkyREV==0) {
if(com.fix_kappa==1) { com.pkappa[0]=com.kappa; ik=1; }
else com.kappa=x[k];
}
for(i=0; i<com.nkappa; i++)
com.pkappa[ik++] = x[k++];
if(com.npi) {
com.ppi = x+com.ntime+com.nrgene+com.nkappa;
GetCodonFreqs ();
}
com.pomega = x+com.ntime+com.nrgene+com.nkappa+com.npi;
}
for(j=0;j<com.nrgene;j++)
com.rgene[j+1] = x[com.ntime+j];
if(com.clock && AbsoluteRate) com.rgene[0] = x[0]; /* so that rgene are abs rates */
if(com.seqtype==1 && com.aaDist>=FIT1)
getpcodonClass(x, pcodonClass);
k=com.ntime+com.nrgene+com.nkappa+com.npi;
if (com.nrate) {
if(!com.model && !com.aaDist && !com.fix_omega && !com.NSsites)
com.omega=x[k];
if(com.seqtype==AAseq)
EigenQaa(NULL, Root, U, V, x+com.ntime+com.nrgene);
else if(com.model==0 && com.NSsites==0 && com.Mgene<=1)
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr, com.pkappa, com.omega,PMat);
else if((com.model==NSbranchB || com.model==NSbranch2)
&& com.NSsites==0 && com.nbtype<=nUVR) {
for(i=0; i<com.nbtype; i++) {
if(com.aaDist == AAClasses)
com.pomega = x+com.ntime+com.nrgene+com.nkappa+com.npi+i*com.nOmegaType;
else
w0 = (i==com.nOmega-1&&com.fix_omega?com.omega_fix:com.pomega[i]);
EigenQcodon(1,-1,NULL,NULL,NULL,_Root[i],_UU[i],_VV[i], &mr, com.pkappa,w0,PMat);
}
}
k = com.ntime+com.nrgene+com.nrate;
}
if (com.seqtype==CODONseq && com.NSsites)
SetParametersNSsites(x);
/* to force crash in case or error
if(com.model) com.omega=-1;
*/
/* branch models */
if(com.seqtype==CODONseq && com.model && com.NSsites==0 && com.aaDist==0) {
FOR(j,tree.nnode) {
if (j==tree.root) continue;
if (com.fix_omega && (int)nodes[j].label==com.nOmega-1)
nodes[j].omega = com.omega_fix;
else
nodes[j].omega = com.pomega[(int)nodes[j].label];
}
}
if (!com.fix_alpha && com.NSsites==0) {
com.alpha = x[k++];
if (com.fix_rho)
DiscreteGamma(com.freqK,com.rK,com.alpha,com.alpha,com.ncatG,DGammaUseMedian);
}
if (!com.fix_rho) {
com.rho=x[k++];
AutodGamma(com.MK, com.freqK, com.rK, &t, com.alpha, com.rho, com.ncatG);
}
return (0);
}
int DiscreteNSsites(double par[])
{
/* This discretizes the continuous distribution for dN/dS ratios among sites
and calculates freqK[] and rK[], using the median method.
par[] contains all paras in the w distribution. par[0] is the
proportion of beta if (com.NSsites==betaw), or the proportion of w=0 if
(com.NSsites=NS02normal).
This routine uses com.NSsites, com.ncatG, com.freqK, com.rK.
betaw has com.ncatG-1 site classes in the beta distribution, and 02normal
has com.ncatG-1 site classes in the mixed normal distribution.
See the function CDFdN_dS() for definitions of parameters.
*/
int status=0, i,j,off, K=com.ncatG-(com.NSsites==NSbetaw || com.NSsites==NS02normal);
double xb[2]={1e-7,99}; /* bounds for omega. */
double p01=0, p, w0, lnbeta;
if(com.NSsites==NSbeta || com.NSsites==NSbetaw) xb[1]=1;
if(com.NSsites==NSbeta || com.NSsites==NSbetaw) {
off = (com.NSsites==NSbetaw); /* par[0] is proportion for beta for M8 */
lnbeta = LnGamma(par[off])+LnGamma(par[off+1])-LnGamma(par[off]+par[off+1]);
for(j=0; j<K; j++) {
p = (j*2.+1)/(2.*K);
com.rK[j] = QuantileBeta(p, par[off], par[off+1], lnbeta);
}
}
else {
for(j=0; j<K; j++) {
p = (j*2. + 1)/(2.*K);
w0 = 0.01 + j/K;
if(com.rK[j]) w0 = (w0 + com.rK[j])/2;
com.rK[j] = Quantile(CDFdN_dS, p, w0, par, xb); /* median */
}
}
for(j=0; j<K; j++) com.freqK[j] = 1.0/K;
if(com.NSsites==NSbetaw) {
if(!com.fix_omega) com.rK[com.ncatG-1] = par[3];
else com.rK[com.ncatG-1] = com.omega_fix;
com.freqK[K] = 1-par[0];
for(j=0; j<K; j++) com.freqK[j] *= par[0];
}
if(com.NSsites==NS02normal) {
for(j=K-1;j>=0;j--) /* shift to right by 1 to make room for spike at 0*/
{ com.rK[j+1]=com.rK[j]; com.freqK[j+1]=com.freqK[j]; }
com.rK[0]=0; com.freqK[0]=par[0];
for(j=1;j<K+1;j++) com.freqK[j]*=(1-par[0]);
}
if(com.NSsites>=NSgamma){
if(!status && com.NSsites==NSbeta)
for(j=1;j<com.ncatG;j++) if(com.rK[j]+1e-7<com.rK[j-1]) status=1;
if(status) {
printf("\nwarning: DiscreteNSsites\nparameters: ");
FOR(j,(com.NSsites==7?2:4)) printf(" %12.6f", par[j]); FPN(F0);
FOR(j,com.ncatG) printf("%13.5f", com.freqK[j]); FPN(F0);
FOR(j,com.ncatG) printf("%13.5e", com.rK[j]); FPN(F0);
}
}
return(0);
}
double CDFdN_dS(double x,double p[])
{
/* This calculates the CDF of the continuous dN/dS distribution over sites,
to be used as argument to the routine Quantile(). When the distribution
has spikes, the spikes are ignored in this routine, and the scaling
is done outside this routine, for example, in DiscreteNSsites().
All parameters (par) for the w distribution are passed to this routine,
although some (p0 for the spike at 0) are not used in this routine.
Parameters are arranged in the following order:
NSgamma (2): alpha, beta
NS2gamma (4): p0, alpha1, beta1, alpha2 (=beta2)
NSbeta (2): p_beta, q_beta
NSbetaw (4): p0, p_beta, q_beta, w (if !com.fix_omega, not used here)
NSbetagamma (5): p0, p_beta, q_beta, alpha, beta
NSbeta1gamma (5): p0, p_beta, q_beta, alpha, beta (1+gamma)
NSbeta1normal (5): p0, p_beta, q_beta, mu, s (normal>1)
NS02normal (5): p0, p1, mu2, s1, s2 (s are sigma's)
NS3normal (6): p0, p1, mu2, s0, s1, s2 (s are sigma's)
Parameters p0 & p1 are transformed if (!LASTROUND)
*/
double cdf=-1;
double z, f[3],mu[3]={0,1,2},sig[3]; /* 3normal: mu0=0 fixed. mu2 estimated */
switch(com.NSsites) {
case(NSgamma): cdf=CDFGamma(x,p[0],p[1]); break;
case(NS2gamma):
cdf=p[0] *CDFGamma(x,p[1],p[2])+(1-p[0])*CDFGamma(x,p[3],p[3]); break;
case(NSbeta): cdf=CDFBeta(x,p[0],p[1],0); break;
case(NSbetaw): cdf=CDFBeta(x,p[1],p[2],0); break;
case(NSbetagamma):
cdf=p[0]*CDFBeta(x,p[1],p[2],0)+(1-p[0])*CDFGamma(x,p[3],p[4]); break;
case(NSbeta1gamma):
if(x<=1) cdf=p[0]*CDFBeta(x,p[1],p[2],0);
else cdf=p[0]+(1-p[0])*CDFGamma(x-1,p[3],p[4]);
break;
case(NSbeta1normal):
if(x<=1) cdf=p[0]*CDFBeta(x,p[1],p[2],0);
else {
cdf=CDFNormal((p[3]-1)/p[4]);
if(cdf<1e-9) {
matout(F0,p,1,5);;
printf("PHI(%.6f)=%.6f\n",(p[3]-1)/p[4],cdf); getchar();
}
cdf=p[0]+(1-p[0])*(1- CDFNormal((p[3]-x)/p[4])/cdf);
}
break;
case(NS02normal):
mu[2]=p[2]; sig[1]=p[3]; sig[2]=p[4];
f[1]=p[1]; f[2]=1-f[1];
cdf = 1 - f[1]* CDFNormal(-(x-mu[1])/sig[1])/CDFNormal(mu[1]/sig[1])
- f[2]* CDFNormal(-(x-mu[2])/sig[2])/CDFNormal(mu[2]/sig[2]);
break;
case(NS3normal):
mu[2]=p[2]; sig[0]=p[3]; sig[1]=p[4]; sig[2]=p[5];
if(LASTROUND) { f[0]=p[0]; f[1]=p[1]; }
else { z=(f[0]=exp(p[0]))+(f[1]=exp(p[1]))+1; f[0]/=z; f[1]/=z;}
f[2]=1-f[0]-f[1];
cdf = 1 - f[0]* 2*CDFNormal(-x/sig[0])
- f[1]* CDFNormal(-(x-mu[1])/sig[1])/CDFNormal(mu[1]/sig[1])
- f[2]* CDFNormal(-(x-mu[2])/sig[2])/CDFNormal(mu[2]/sig[2]);
break;
}
return(cdf);
}
void GetSNphysical(double pi[], double *Sphysical, double *Nphysical, double *S4)
{
/* this calculates the synonymous and nonsynonymous sites according to the
physical-site definition (Yang 2006 Computational Molecular Evolution, Section 2.5.4).
S and N are sites per codon.
It is not clear how to deal with stop codons.
*/
int i,j,k, ic,b[3], aa0,aa1, *code=GeneticCode[com.icode];
int by[3]={16,4,1}, nstop,s,n;
double y;
for(i=0,*Sphysical=*Nphysical=*S4=0; i<com.ncode; i++) {
ic=FROM61[i]; b[0]=ic/16; b[1]=(ic/4)%4; b[2]=ic%4;
/* no need to check the first and second positions here */
if(FourFold[b[0]][b[1]]) *S4 += pi[i];
aa0=code[ic];
for(j=0,s=n=nstop=0; j<3; j++) FOR(k,3) {
aa1 = code[ic + ((b[j]+k+1)%4 - b[j])*by[j]];
if(aa1==-1) nstop++;
else if(aa0==aa1) s++;
else n++;
}
/* s + n ~= 9 */
*Sphysical += pi[i]*s/9.*3.;
*Nphysical += pi[i]*n/9.*3.;
}
y = (*Sphysical + *Nphysical)/3;
*Sphysical /= y; *Nphysical /= y;
}
double GetOmega (int aa1, int aa2, double omega, double pomega[])
{
/* this gets the omega (w) value under different models for EigenQcodon().
*/
double w=1, fit1,fit2;
int k;
if (com.aaDist==AAClasses) {
if (aa1<aa2) { k=aa2; aa2=aa1; aa1=k; }
k=aa1*(aa1-1)/2+aa2;
if (pomega[OmegaAA[k]]<0) {
if (noisy) printf("aa1 & aa2 & iw & w: %d %d %d %.5f\n",
aa1,aa2,OmegaAA[k],pomega[OmegaAA[k]]);
pomega[OmegaAA[k]]=0;
}
if (com.seqtype==AAseq && com.nrate>65 && aa1*20+aa2==ijAAref)
; /* if estimating grantham's matrix with aa sequences */
else w = pomega[OmegaAA[k]];
}
else if (com.aaDist==0) w = omega; /* NSsites==0 or >0 */
else if (com.aaDist<=6) { /* chemical properties: a & b */
w = pomega[0]*com.daa[aa1*20+aa2];
if(com.aaDist>0) w = exp(-w); /* geometric */
else w = 1-w; /* linear */
if (com.seqtype==CODONseq) w *= pomega[1];
}
else if (com.aaDist>=FIT1) { /* ap,p*,av,v* (and w0 for FIT2) */
fit1 = -pomega[0]*square(AAchem[0][aa1]-pomega[1])
-pomega[2]*square(AAchem[1][aa1]-pomega[3]);
fit2 = -pomega[0]*square(AAchem[0][aa2]-pomega[1])
-pomega[2]*square(AAchem[1][aa2]-pomega[3]);
w = exp(-fit1-fit2);
if(com.aaDist==FIT2) w *= pomega[4];
}
return(w);
}
double GetMutationMultiplier (int i, int j, int pos, int from[3], int to[3])
{
/* This sets the mutation-bias multipliers for F1x4MG, F3x4MG, FMutSel0, FMutSel.
com.pi[], com.pf3x4[], and com.piAA[] are set correctly before this routine is called.
*/
int n=com.ncode, b1,b2;
double q, eFit1, eFit2, small=min2(1e-6, 1./com.ls);
/* b1 and b2 are the 2 unchanged positions */
if (pos==0) { b1=1; b2=2; }
else if(pos==1) { b1=2; b2=0; }
else { b1=0; b2=1; }
q = 1 / (com.pf3x4[b1*4+to[b1]] * com.pf3x4[b2*4+to[b2]]);
if(com.npi && (com.codonf==FMutSel || com.codonf==FMutSel0)) {
eFit1 = max2(com.pi[i], small);
eFit2 = max2(com.pi[j], small);
eFit1 /= com.pf3x4[from[0]] * com.pf3x4[from[1]] * com.pf3x4[from[2]];
eFit2 /= com.pf3x4[ to[0]] * com.pf3x4[ to[1]] * com.pf3x4[to[2]];
if(fabs(eFit2-eFit1)>1e-10)
q *= (log(eFit2)-log(eFit1))/(eFit2-eFit1);
else
q /= eFit2;
}
return(q);
}
int SelectionCoefficients (FILE* fout, double kappa[], double ppi[], double omega)
{
/* This calculates the distribution of S or 2Ns under the FMutSel or FMutSel0 models.
Qsubw[] is not correct if (com.NSsites) and the results are not printed.
*/
int n=Nsensecodon, i,j,k, ic1,ic2,b1,b2;
int ndiff,pos=0,from[3],to[3];
double q, summut=0, summutp=0, sumsub=0, sumsubw=0, eF1,eF2, fb[4];
double bigS=2, sumbadmut=0,sumgoodmut=0;
double Qmut[NCODE*NCODE], Qsub[NCODE*NCODE], Qsubw[NCODE*NCODE], Ns[NCODE*NCODE], mNs=0,mNsp=0,mNsn=0;
double maxNs=0, fNsMut[50]={0}, fNsSub[50]={0}, fNsSubw[50]={0}, small=min2(1e-6, 1./com.ls);
int ncat=21;
if(com.codonf<FMutSel0)
error2("codonf incorrect");
fprintf(fout, "\nI\tJ\tij\t2Ns_IJ\tpMut_IJ\tpSub_IJ\t2Ns_JI\tpMut_JI\tpSub_JI\n\n");
fb[0]=ppi[0]; fb[1]=ppi[1]; fb[2]=ppi[2]; fb[3]=1;
for (i=0;i<n*n;i++) Qmut[i]=Qsub[i]=Qsubw[i]=0;
for (i=1; i<n; i++) {
ic1=FROM61[i]; from[0]=ic1/16; from[1]=(ic1/4)%4; from[2]=ic1%4;
for(j=0; j<i; j++) {
ic2=FROM61[j]; to[0]=ic2/16; to[1]=(ic2/4)%4; to[2]=ic2%4;
for(k=0,ndiff=0; k<3; k++)
if(from[k]!=to[k]) { ndiff++; pos=k; }
if(ndiff!=1) continue;
q = 1;
if(com.hkyREV) { /* REV-GTR model */
b1 = min2(from[pos],to[pos]); /* b1 and b2 are changed nucleotides */
b2 = max2(from[pos],to[pos]);
if (b1==0 && b2==1) q = kappa[0]; /* TC or CT, relative to AG */
else if (b1==0 && b2==2) q = kappa[1]; /* TA or AT */
else if (b1==0 && b2==3) q = kappa[2]; /* TG or GT */
else if (b1==1 && b2==2) q = kappa[3]; /* CA or AC */
else if (b1==1 && b2==3) q = kappa[4]; /* CG or GC */
}
else { /* HKY model */
if(from[pos]+to[pos]==1 || from[pos]+to[pos]==5)
q = kappa[0];
}
eF1 = max2(com.pi[i], small) / (fb[from[0]] * fb[from[1]] * fb[from[2]]);
eF2 = max2(com.pi[j], small) / (fb[ to[0]] * fb[ to[1]] * fb[to[2]]);
Ns[i*n+j] = log(eF2/eF1);
Ns[j*n+i] = -Ns[i*n+j];
if(maxNs < fabs(Ns[i*n+j])) maxNs = fabs(Ns[i*n+j]);
Qmut[i*n+j] = Qsub[i*n+j] = com.pi[i] * q * fb[ to[pos]];
Qmut[j*n+i] = Qsub[j*n+i] = com.pi[j] * q * fb[from[pos]];
if(fabs(Ns[i*n+j]) > 1e-20) { /* non-neutral mutations */
Qsub[i*n+j] *= Ns[i*n+j]/(1 - exp(-Ns[i*n+j]));
Qsub[j*n+i] *= Ns[j*n+i]/(1 - exp(-Ns[j*n+i]));
}
Qsubw[i*n+j] = Qsub[i*n+j];
Qsubw[j*n+i] = Qsub[j*n+i];
if(!com.NSsites && GeneticCode[com.icode][ic1] != GeneticCode[com.icode][ic2]) {
Qsubw[i*n+j] *= com.omega;
Qsubw[j*n+i] *= com.omega;
}
summut += Qmut[i*n+j] + Qmut[j*n+i];
sumsub += Qsub[i*n+j] + Qsub[j*n+i];
sumsubw += Qsubw[i*n+j] + Qsubw[j*n+i];
if(fabs(Ns[i*n+j]) > 1e-20) { /* non-neutral mutations */
summutp += (Ns[i*n+j]>0 ? Qmut[i*n+j] : Qmut[j*n+i]);
mNsp += (Ns[i*n+j]>0 ? Qmut[i*n+j]*Ns[i*n+j] : Qmut[j*n+i]*Ns[j*n+i]);
mNsn += (Ns[i*n+j]<0 ? Qmut[i*n+j]*Ns[i*n+j] : Qmut[j*n+i]*Ns[j*n+i]);
}
else { /* neutral mutation. Ns = 0 makes no contribution to mNsp & mNsn */
summutp += (Qmut[i*n+j] + Qmut[j*n+i])/2;
}
mNs += (Qmut[i*n+j]+Qmut[j*n+i])*fabs(Ns[i*n+j]);
if (fabs(Ns[i*n+j])>bigS) {
if (Ns[i*n+j]>0) {
sumgoodmut += Qmut[i*n+j];
sumbadmut += Qmut[j*n+i];
}
else {
sumgoodmut += Qmut[j*n+i];
sumbadmut += Qmut[i*n+j];
}
}
fprintf(fout, "%c%c%c\t", BASEs[from[0]],BASEs[from[1]],BASEs[from[2]]);
fprintf(fout, "%c%c%c\t", BASEs[ to[0]],BASEs[ to[1]],BASEs[ to[2]]);
fprintf(fout, "%c%c", BASEs[from[pos]],BASEs[to[pos]]);
fprintf(fout, "\t%.5f\t%.5f\t%.5f", Ns[i*n+j], Qmut[i*n+j], Qsub[i*n+j]);
fprintf(fout, "\t%.5f\t%.5f\t%.5f", Ns[j*n+i], Qmut[j*n+i], Qsub[j*n+i]);
if(!com.NSsites)
fprintf(fout, "\t%.5f\t%.5f", Qsubw[i*n+j], Qsubw[j*n+i]);
FPN(fout);
} /* for (j) */
} /* for (i) */
sumgoodmut /= summut;
sumbadmut /= summut;
mNs /= summut;
mNsp /= summutp;
mNsn /= summut-summutp;
fprintf(fout, "\n\nHistograms\n2Ns\tFMut\tFSub(CodonUsage)\tFSubw(after w)\n\n");
for(i=0; i<n; i++) {
for(j=0; j<n; j++) {
if(Qmut[i*n+j] == 0) continue;
for(k=0; k<ncat-1; k++) {
if(Ns[i*n+j] < (-1 + (k+1.)*2/ncat)*maxNs) break;
}
fNsMut[k] += Qmut[i*n+j]/summut;
fNsSub[k] += Qsub[i*n+j]/sumsub;
fNsSubw[k] += Qsubw[i*n+j]/sumsubw;
}
}
for(k=0; k<ncat; k++) {
fprintf(fout, "%.5f\t%.5f\t%.5f", (-1 + (k+0.5)*2/ncat)*maxNs, fNsMut[k], fNsSub[k]);
if(!com.NSsites)
fprintf(fout, "\t%.5f", fNsSubw[k]);
FPN(fout);
}
fprintf(fout, "\nProportion of advantageous (S > 0) mutations:\n %.5f\n", summutp/summut);
fprintf(fout, "\nProportions of good & bad mutations (|S| > %.4f) among mutations:\n%.5f %.5f\n",
bigS, sumgoodmut, sumbadmut);
fprintf(fout, "\nmean |Ns| = %.5f\tmean Ns+ = %.5f\tmean Ns- = %.5f\n", mNs,mNsp,mNsn);
fprintf(frst1, "\t%.4f\t%.4f\t%.4f", mNs, mNsp, mNsn);
return(0);
}
int EigenQcodon (int mode, double blength, double *S, double *dS, double *dN,
double Root[], double U[], double V[], double *meanrate, double kappa[], double omega, double Q[])
{
/* This contructs the rate matrix Q for codon substitution and gets the eigen
values and vectors if getstats==0, or get statistics (dS & dN etc.) if
getstats==1.
The routine is also called by Qcodon2aa for mechanistic amino acid
substitution models.
Input parameters are kappa, omega and com.pi (or com.fb61).
Statistics calculated include S, dS & dN.
c0[0,1,2] and c[0,1,2] are rates for the 3 codon positions before and after
selection. c4 is for 4-fold rates. ts[3] and tv[3] are transition/
transversion rates for the three codon positions, not calculated.
mode=0: construct Q; 1: calculate UVRoot; 2:calculate statistics
*Qfactor or *meanrate:
=0 means that Q is scaled as usual;
<0 means that the scale factor will be calculated and returned
>0 the given scale factor is applied (1 means no scaling).
Note that under NSsites or branch&site models, scaling is done for all Q
matrices for the whole branch.
aaDist=FIT1 & FIT2: ap,p*,av,v*, (and w0 for FIT2)
The argument omega is used only if the model assumes one omega. For
AAClasses, com.pomega is used instead.
*/
int n=Nsensecodon, i,j,k, ic1,ic2,aa1,aa2, b1,b2;
int ndiff,pos=0,from[3],to[3];
double q, mr, rs0,ra0,rs,ra, y;
double Sphysical, Nphysical, S4, dSnew, dNnew;
double d4=0, d0[3], d[3], ts[3], tv[3]; /* rates at positions and 4-fold sites */
double *pi=(com.seqtype==AAseq?com.fb61:com.pi), w=-1, piQij;
double space[NCODE*(NCODE+1)];
/* Delete this after the MutSel project. */
static int times=0;
if(mode==1) times=0;
else times++;
NEigenQ++;
if(blength>=0 && (S==NULL||dS==NULL||dN==NULL)) error2("EigenQcodon");
for (i=0;i<n*n;i++) Q[i]=0;
for (i=1; i<n; i++) {
ic1=FROM61[i]; from[0]=ic1/16; from[1]=(ic1/4)%4; from[2]=ic1%4;
for(j=0; j<i; j++) {
ic2=FROM61[j]; to[0]=ic2/16; to[1]=(ic2/4)%4; to[2]=ic2%4;
for(k=0,ndiff=0; k<3; k++)
if(from[k]!=to[k]) { ndiff++; pos=k; }
if(ndiff!=1) continue;
q = 1;
if(com.hkyREV) { /* REV-GTR model */
b1 = min2(from[pos],to[pos]); /* b1 and b2 are changed nucleotides */
b2 = max2(from[pos],to[pos]);
if (b1==0 && b2==1) q = kappa[0]; /* TC or CT, relative to AG */
else if (b1==0 && b2==2) q = kappa[1]; /* TA or AT */
else if (b1==0 && b2==3) q = kappa[2]; /* TG or GT */
else if (b1==1 && b2==2) q = kappa[3]; /* CA or AC */
else if (b1==1 && b2==3) q = kappa[4]; /* CG or GC */
}
else { /* HKY model */
if(from[pos]+to[pos]==1 || from[pos]+to[pos]==5)
q = kappa[0];
}
if (com.codonf>=F1x4MG && com.codonf<=FMutSel && com.codonf!=Fcodon)
q *= GetMutationMultiplier (i, j, pos, from, to);
aa1 = GeneticCode[com.icode][ic1];
aa2 = GeneticCode[com.icode][ic2];
if(aa1 != aa2)
q *= GetOmega(aa1, aa2, omega, com.pomega);
Q[i*n+j] = q*pi[j];
Q[j*n+i] = q*pi[i];
} /* for (j) */
} /* for (i) */
for (i=0; i<n; i++)
Q[i*n+i] = -sum(Q+i*n,n);
for (i=0,mr=0; i<n; i++)
mr -= pi[i]*Q[i*n+i];
if(mode==1) { /* get Root, U, & V */
if (com.seqtype==AAseq) return (0);
eigenQREV(Q, pi, n, Root, U, V, space);
if(*meanrate>= 0) { /* apply scaling if meanrate>0 */
if(*meanrate>0)
mr = *meanrate;
for (i=0; i<n; i++)
Root[i] /= mr;
}
}
else if(mode==2) { /* get statistics */
for(i=0;i<3;i++) d[i] = d0[i] = ts[i] = tv[i]=0;
rs0 = ra0 = rs = ra = 0;
for (i=0; i<n; i++) {
ic1=FROM61[i]; from[0]=ic1/16; from[1]=(ic1/4)%4; from[2]=ic1%4;
for(j=0; j<n; j++) {
if(i==j || Q[i*n+j]==0) continue;
ic2=FROM61[j]; to[0]=ic2/16; to[1]=(ic2/4)%4; to[2]=ic2%4;
aa1 = GeneticCode[com.icode][ic1];
aa2 = GeneticCode[com.icode][ic2];
for(k=0,ndiff=0; k<3; k++)
if(from[k] != to[k]) { ndiff++; pos=k; }
if(ndiff!=1) error2("jgl");
piQij = pi[i]*Q[i*n+j];
if(pos==2 && FourFold[to[0]][to[1]])
d4 += piQij;
if(aa1==aa2) {
rs += piQij;
d0[pos] += piQij;
}
else {
ra += piQij;
w = GetOmega(aa1, aa2, omega, com.pomega);
ra0 += piQij/w;
d0[pos] += piQij/w;
}
d[pos] += piQij;
} /* for (j) */
} /* for (i) */
if(fabs(mr-(rs+ra)) > 1e-6)
error2("mr should be = rs+ra");
rs0 = rs;
w = (rs0+ra0); rs0 /= w; ra0 /= w; *S = rs0*3*com.ls;
if(com.NSsites==0 && blength>=0) { /* calculates dS & dN */
if(blength==0) *dS = *dN = 0;
rs /= mr;
ra /= mr;
*dS = blength*rs/(3*rs0);
*dN = blength*ra/(3*ra0);
w = (*dS>0 ? *dN/ *dS : -1);
GetSNphysical(com.pi, &Sphysical, &Nphysical, &S4);
for(i=0;i<3;i++) {
d[i] *= blength/mr;
d0[i] *= blength/mr;
}
d4 *= blength/mr/S4;
dNnew = blength*ra/Nphysical;
dSnew = blength*rs/Sphysical;
if(noisy>=9) {
printf("\nd123[*] =%9.5f%9.5f%9.5f average%9.5f\n", d[0],d[1],d[2], (d[0]+d[1]+d[2])/3);
printf( " [B] =%9.5f%9.5f%9.5f average%9.5f\n", d0[0],d0[1],d0[2], (d0[0]+d0[1]+d0[2])/3);
printf("accept =%9.5f%9.5f%9.5f\n\n", d[0]/d0[0],d[1]/d0[1],d[2]/d0[2]);
printf("w =%9.5f dN =%9.5f dS =%9.5f d4 =%9.5f (%.1f four-fold sites)\n", w, *dN,*dS, d4, S4*com.ls);
printf("%12s dN*=%9.5f dS*=%9.5f S* =%7.2f N* =%7.2f\n", "", dNnew, dSnew, Sphysical*com.ls, Nphysical*com.ls);
}
/* print out dN* dS* d4 d3B */
if(com.verbose && times==1 && com.ns==2)
fprintf(frst1, "\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f",
*dN*2, *dS*2, dNnew*2, dSnew*2, d0[2]*2, d4*2);
}
else if (com.NSsites) {
*dS = rs/(rs0*3);
*dN = ra/(ra0*3);
}
}
if(*meanrate<0) *meanrate = mr;
return(0);
}
int EigenQaa (FILE *fout, double Root[], double U[], double V[], double rate[])
{
/* Codon-based AA model must use FromCodon, even if com.aaDist==AAClasses.
*/
int naa=20, i,j,k;
double Q[20*20], mr=0, t=0;
double space[NCODE*NCODE*2+NCODE],*Qc=space+NCODE*NCODE, *space_pisqrt=Qc+NCODE*NCODE;
char aa3[4]="", AAratefile[96]="AAratefile.dat";
FILE *fAArate;
for(i=0; i<naa*naa; i++) Q[i]=0;
switch (com.model) {
case (Poisson) : case (EqualInput) :
fillxc (Q, 1., naa*naa); break;
case (Empirical) : case (Empirical_F):
for(i=0; i<naa; i++) for(j=0; j<i; j++)
Q[i*naa+j]=Q[j*naa+i]=com.daa[i*naa+j];
break;
case (FromCodon): /* EigenQcodon check mode value */
EigenQcodon(0,-1,NULL,NULL,NULL,Root,U,V, &mr,
(com.hkyREV||com.codonf==FMutSel?rate:&com.kappa),com.omega,Qc);
Qcodon2aa(Qc, com.fb61, Q, space);
break;
case (REVaa_0) :
for (i=1,k=0; i<naa; i++)
for (j=0; j<i; j++)
if (AA1STEP[i*(i-1)/2+j] && i*naa+j!=ijAAref)
Q[i*naa+j] = Q[j*naa+i] = rate[k++];
k = ijAAref;
Q[(k/naa)*naa+k%naa] = Q[(k%naa)*naa+k/naa] = 1;
break;
case (REVaa) :
for (i=0,k=0; i<naa; i++)
for (j=0; j<i; j++)
if (i*naa+j != ijAAref) Q[i*naa+j] = Q[j*naa+i] = rate[k++];
Q[ijAAref] = Q[(ijAAref%naa)*naa+(ijAAref/naa)] = 1;
break;
}
for(i=0; i<naa; i++) for(j=0; j<naa; j++)
Q[i*naa+j] *= com.pi[j];
for (i=0,mr=0; i<naa; i++) {
Q[i*naa+i] = 0;
Q[i*naa+i] = -sum(Q+i*naa,naa);
mr -= com.pi[i]*Q[i*naa+i];
}
if (fout && com.model>=REVaa_0) {
printf("\nAA substitution rate matrix printed into %s\n", AAratefile);
fAArate=(FILE*)gfopen(AAratefile,"w");
fprintf (fout, "\n\nRate matrix (symmetrical part, Sij)\n");
for(i=0,t=0; i<naa; i++) {
if(com.pi[i]==0) error2("EigenQaa: do this now");
for(j=0; j<i; j++)
t += Q[i*naa+j]/com.pi[j]/(naa*(naa-1)/2.);
}
for(i=0; i<naa; i++) {
fprintf (fout, "\n%-5s", getAAstr(aa3,i));
for(j=0; j<i; j++) fprintf(fout, " %8.2f", Q[i*naa+j]/t/com.pi[j]*100);
for(j=0; j<i; j++) fprintf(fAArate, " %8.2f", Q[i*naa+j]/t/com.pi[j]*100);
FPN(fAArate);
}
fputs("\n ",fout);
for(i=0; i<naa; i++)
fprintf(fout,"%5s", getAAstr(aa3,i));
FPN(fout);
fflush(fout);
matout(fAArate, com.pi, 1, naa);
for(i=0; i<naa; i++)
fprintf(fAArate,"%12s", getAAstr(aa3,i));
FPN(fAArate);
fprintf(fAArate,"\n\nNote: Amino acid rate matrix estimated from %s\n", com.seqf);
fclose(fAArate);
}
if (fout && frst1 && com.model>=REVaa_0) {
fprintf(frst1, "\nRate matrix (symmetrical part, Sij) for bubble plot\n");
for(i=0; i<naa; i++) for(j=0; j<i; j++)
fprintf(frst1, "\t%d\t%d\t%.2f\n", i+1,j+1,Q[i*naa+j]/t/com.pi[j]*100);
}
eigenQREV(Q, com.pi, naa, Root, U, V, space_pisqrt);
for(i=0; i<naa; i++)
Root[i] = Root[i]/mr;
return (0);
}
int Qcodon2aa (double Qc[], double pic[], double Qaa[], double piaa[])
{
/* Qc -> Qaa
This routine constructs the rate matrix for amino acid replacement from
the rate matrix for codon substitution, by congregating states in the
Markov chain. Both processes are time reversible, and only the
symmetrical part of the rate matrix are constructed. Codon frequencies
pic[] are used. They are constructed by assigning equal frequencies for
synonymous codons in the routine AA2Codonf().
Qaa(aai,aaj) = SUMi SUMj (piC[i]*piC[j]]*Qc[i][j]) / (piAA[i]*piAA[j])
*/
int i, j, aai, aaj, nc=Nsensecodon, naa=20;
double ti, tij;
zero(piaa,naa);
zero(Qaa,naa*naa);
for(i=0; i<nc; i++)
piaa[GeneticCode[com.icode][FROM61[i]]] += pic[i];
for(i=0; i<nc; i++) {
aai = GeneticCode[com.icode][FROM61[i]];
if(piaa[aai]==0) ti = 0;
else ti = pic[i]/piaa[aai];
for(j=0; j<i; j++) {
aaj = GeneticCode[com.icode][FROM61[j]];
if (Qc[i*nc+j]==0 || aai==aaj) continue;
if(piaa[aaj]==0)
tij = 0;
else
tij = ti*pic[j]*Qc[i*nc+j]/piaa[aaj];
Qaa[aai*naa+aaj] += tij;
Qaa[aaj*naa+aai] += tij;
}
}
return (0);
}
// Here we start GPUing
__global__ void TCconP(double *PMat_d, double *conP_d, char *z_d, int pos0, int pos1, int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<(n*(pos1-pos0)))
{
i += pos0*n;
// if other functions in parallel with this might have to atomicMult
conP_d[i] *= PMat_d[ (i%n)*n + z_d[i/n] ];
}
}
__global__ void TnCconP(double *PMat_d, double *conP_d, char *z_d, int pos0, int pos1, int n, double t)
{
}
int ConditionalPNode (int inode, int igene, double x[])
{
int n=com.ncode, i,j,k,h, ison, pos0=com.posG[igene], pos1=com.posG[igene+1];
double t;
for(i=0; i<nodes[inode].nson; i++)
if(nodes[nodes[inode].sons[i]].nson>0 && !com.oldconP[nodes[inode].sons[i]])
ConditionalPNode(nodes[inode].sons[i], igene, x);
if(inode<com.ns)
for(h=pos0*n; h<pos1*n; h++)
nodes[inode].conP[h] = 0; /* young ancestor */
else
for(h=pos0*n; h<pos1*n; h++)
nodes[inode].conP[h] = 1;
if (com.cleandata && inode<com.ns)
for(h=pos0; h<pos1; h++)
nodes[inode].conP[h*n+com.z[inode][h]] = 1;
for (i=0; i<nodes[inode].nson; i++) {
ison = nodes[inode].sons[i];
t = nodes[ison].branch * _rateSite;
if(com.clock<5) {
if(com.clock) t *= GetBranchRate(igene,(int)nodes[ison].label,x,NULL);
else t *= com.rgene[igene];
}
GetPMatBranch(PMat, x, t, ison);
if (nodes[ison].nson<1 && com.cleandata) { /* tip && clean */
for(h=pos0; h<pos1; h++)
for(j=0; j<n; j++)
nodes[inode].conP[h*n+j] *= PMat[j*n+com.z[ison][h]];
}
else if (nodes[ison].nson<1 && !com.cleandata) { /* tip & unclean */
for(h=pos0; h<pos1; h++)
for(j=0; j<n; j++) {
for(k=0,t=0; k<nChara[com.z[ison][h]]; k++)
t += PMat[j*n+CharaMap[com.z[ison][h]][k]];
nodes[inode].conP[h*n+j] *= t;
}
}
else { /* internal node */
for(h=pos0; h<pos1; h++)
for(j=0; j<n; j++) {
for(k=0,t=0; k<n; k++)
t += PMat[j*n+k]*nodes[ison].conP[h*n+k];
nodes[inode].conP[h*n+j] *= t;
}
}
} /* for (ison) */
if(com.NnodeScale && com.nodeScale[inode])
NodeScale(inode, pos0, pos1);
return (0);
}
int PMatJC69like (double P[], double t, int n)
{
int i;
double pii=1./n+(1.-1./n)*exp(-n/(n-1.)*t), pij=(1.-pii)/(n-1.);
for(i=0; i<n*n; i++) P[i] = pij;
for(i=0; i<n; i++) P[i*n+i] = pii;
return (0);
}
double Fcodon_3x4 (double fcodon[], double fb3x4[]);
void OutFb3x4(FILE*fout, double fb3x4[]);
void CountCodons (FILE *fout,double fcodonsg[],double fb3x4sg[],double fb4g[]);
double Fcodon_3x4(double fcodon[], double fb3x4[])
{
/* this converts the codon frequencies into a fb3x4 table. fcodon has 64 codons.
*/
int b[3], k,j, nc=64, status=0;
double t;
zero(fb3x4,12);
for(k=0; k<nc; k++) {
b[0]=k/16; b[1]=(k%16)/4; b[2]=k%4;
for(j=0; j<3; j++)
fb3x4[j*4+b[j]]+=fcodon[k];
}
for(j=0; j<3; j++) {
t=sum(fb3x4+j*4, 4);
if(t<1e-15) status=-1;
abyx(1/t, fb3x4+j*4, 4);
}
if(status)
matout (F0, fcodon, 16, 4);
return(status);
}
void OutFb3x4 (FILE*fout, double fb3x4[])
{
int j,k;
for(j=0; j<3; j++) {
fprintf(fout, "\nposition %2d:", j+1);
for(k=0;k<4;k++)
fprintf(fout,"%5c:%7.5f", BASEs[k],fb3x4[j*4+k]);
}
fprintf(fout,"\nAverage ");
for(k=0; k<4; k++)
fprintf(fout,"%5c:%7.5f", BASEs[k],(fb3x4[0*4+k]+fb3x4[1*4+k]+fb3x4[2*4+k])/3);
}
void CountCodons (FILE *fout,double fcodonsg[],double fb3x4sg[],double fb4g[])
{
/* Outputs codon counts and f3x4 tables, called from InitializeCodon(), where
more notes are found.
*/
int h, j,k, nc=NCODE, ig, wname=15, nb[3], ib[3][4], ic;
/* counts codons for output, species first, genes next */
fputs("Codon usage in sequences\n",fout);
zero(fcodonsg, com.ns*nc);
for(j=0; j<com.ns; j++) {
for(h=0; h<com.npatt; h++) {
for(k=0; k<3; k++)
NucListall(CODONs[com.z[j][h]][k], &nb[k], ib[k]);
k = nb[0]*nb[1]*nb[2];
if(k>1) continue;
ic = ib[0][0]*16+ib[1][0]*4+ib[2][0];
fcodonsg[j*nc+ic] += com.fpatt[h];
}
Fcodon_3x4(fcodonsg+j*nc, fb3x4sg+j*12);
}
printcums(fout, com.ns, fcodonsg, com.icode);
fputs("Codon position x base (3x4) table for each sequence.",fout);
for(j=0; j<com.ns; j++) {
fprintf (fout,"\n\n#%d: %-*s", j+1,wname,com.spname[j]);
OutFb3x4(fout, fb3x4sg+j*12);
}
zero(fcodonsg, (com.ngene+1)*nc);
zero(fb4g, (com.ngene+1)*4);
for(ig=0; ig<com.ngene; ig++) {
for(j=0; j<com.ns; j++) {
for(h=com.posG[ig]; h<com.posG[ig+1]; h++) {
for(k=0; k<3; k++)
NucListall(CODONs[com.z[j][h]][k], &nb[k], ib[k]);
k = nb[0]*nb[1]*nb[2];
if(k>1) continue;
ic = ib[0][0]*16+ib[1][0]*4+ib[2][0];
fcodonsg[ig*nc+ic] += com.fpatt[h];
}
}
if(Fcodon_3x4(fcodonsg+ig*nc, fb3x4sg+ig*12)) {
printf("Gene %d seems empty.", ig+1);
exit(-1);
}
}
if(com.ngene>1) {
fputs("\n\nCodon usage in genes\n",fout);
printcums(fout, com.ngene, fcodonsg, com.icode);
fputs("Codon position x base (3x4) table for each gene.\n",fout);
for(ig=0; ig<com.ngene; ig++) {
fprintf (fout,"\n\nGene #%d", ig+1);
OutFb3x4(fout, fb3x4sg+ig*12);
}
}
for(ig=0; ig<com.ngene; ig++)
for(k=0;k<nc;k++) fcodonsg[com.ngene*nc+k]+=fcodonsg[ig*nc+k];
Fcodon_3x4(fcodonsg+com.ngene*nc, fb3x4sg+com.ngene*12);
for(ig=0; ig<com.ngene+1; ig++)
for(j=0;j<3;j++) for(k=0;k<4;k++) fb4g[ig*4+k]+=fb3x4sg[ig*12+j*4+k]/3;
fputs("\n\nSums of codon usage counts",fout);
printcu(fout, fcodonsg+com.ngene*nc, com.icode);
if(!com.cleandata) fputs("\n(Ambiguity data are not used in the counts.)\n",fout);
fputs("\n\nCodon position x base (3x4) table, overall\n",fout);
OutFb3x4(fout, fb3x4sg+com.ngene*12);
{
double *fb3x4 = fb3x4sg+com.ngene*12, GC3;
GC3 = (fb3x4[0*4+1] + fb3x4[1*4+1] + fb3x4[2*4+1])/3
+ (fb3x4[0*4+3] + fb3x4[1*4+3] + fb3x4[2*4+3])/3;
fprintf(frst1, "\t%.4f", GC3);
}
}
void AddCodonFreqSeqGene (int js, int ig, double fcodon0[], double fcodon[],
double fb3x40[], double fb3x4[],
double fb40[], double fb4[]);
void AddCodonFreqSeqGene (int js, int ig, double fcodon0[], double fcodon[],
double fb3x40[], double fb3x4[],
double fb40[], double fb4[])
{
/* This adds codon and nucleotide counts in sequence js in gene ig to fcodon,
fb3x4, and fb4, using fcodon0, fb3x40, and fb40 to resolve ambiguities
Similar to AddFreqSeqGene().
*/
int h, k, i0,i1,i2, nc=NCODE;
int nb[3],ib[3][4],ic=-1;
double t,t1;
char str[4]=" ", codon[4]=" ", ft[64];
for(h=com.posG[ig]; h<com.posG[ig+1]; h++) {
for(k=0; k<3; k++)
NucListall(CODONs[com.z[js][h]][k], &nb[k], ib[k]);
k = nb[0]*nb[1]*nb[2];
for(k=0; k<3; k++) { /* f3x4 & f1x4, no regard for stop codons */
for(i0=0,t=t1=0; i0<nb[k]; i0++) {
t += fb3x40[k*4+ib[k][i0]];
t1 += fb40[ib[k][i0]];
}
for(i0=0; i0<nb[k]; i0++) {
fb3x4[k*4+ib[k][i0]] += com.fpatt[h] * fb3x40[k*4+ib[k][i0]]/t;
fb4[ib[k][i0]] += com.fpatt[h]* fb40[ib[k][i0]]/t1;
}
}
for(i0=0; i0<64; i0++) ft[i0]=0;
for(i0=k=0,t=0; i0<nb[0]; i0++) FOR(i1,nb[1]) FOR(i2,nb[2]) {
ic = ib[0][i0]*16+ib[1][i1]*4+ib[2][i2];
if(FROM64[ic]==-1) continue;
ft[ic] = 1; k++;
t += fcodon0[ic];
}
if(k==0) printf("%s in seq. %d is stop (icode=%d)\n",
getcodon(str,ic),js+1,com.icode);
if(t<1e-100)
printf("difficulty in resolving codon %s.\n", codon);
for(ic=0; ic<nc; ic++) if(ft[ic])
fcodon[ic] += (t>0 ? com.fpatt[h]*fcodon0[ic]/t : com.fpatt[h]/k);
}
}
int InitializeCodon (FILE *fout, double space[])
{
/* Count codons for genes, calculate site patterns and fpatt.
Sequences com.z[] are not coded and may contain ambiguity characters
Space requirement for fcodonsg & fb3x4sg: max(ngene+1,ns)*(64+12+4).
First we count codons for output, with ambiguity characters ignored.
Then we recount to resolve ambiguity characters, to be used for ML
calculation later on.
set up com.pi[NCODE], com.piG[NGENE][64], according to com.codonf
com.pi[] has freqs for all codon sites in the seqs if ngene>1.
Space use is not economical as com.piG and fcodonsg are separate and
duplicated.
*/
int j,k, nc=NCODE, ig, ic[3], wrongorder[4]={2,1,3,0};
int irf,nrf=20;
double *fcodonsg=space, *fb3x4sg=space+max2((com.ngene+1),com.ns)*nc;
double *fb4g=space+(com.ngene+1)*(64+12);
double *ppi, fcodon0[64],fb3x40[12],fb40[4], d1,d2,d3;
/* counts codons for output, species first, genes next */
if(noisy) puts("Counting codons..");
CountCodons(fout, fcodonsg, fb3x4sg, fb4g);
/* Now to count fcodonsg, fb3x4sg, fb4g, to set up pi's for ML calculation.
Three iterations are going on at the same time.
*/
if (com.codonf!=Fequal && !com.cleandata) { /* iteration to resolve ambiguities */
for(ig=0; ig<com.ngene; ig++) { /* calculate com.piG[] */
axtoy(1/sum(fcodonsg+ig*nc,nc), fcodonsg+ig*nc, fcodon0, nc);
xtoy(fb3x4sg+ig*12, fb3x40, 12);
xtoy(fb4g+ig*4, fb40, 4);
for(irf=0; irf<nrf; irf++) {
zero(fcodonsg + ig*nc, nc);
zero(fb3x4sg + ig*12, 12);
zero(fb4g+ig*4, 4);
for(j=0; j<com.ns; j++) {
AddCodonFreqSeqGene (j, ig, fcodon0, fcodonsg+ig*nc,
fb3x40, fb3x4sg+ig*12, fb40, fb4g+ig*4);
}
abyx(1/sum(fcodonsg+ig*nc,nc), fcodonsg + ig*nc, nc);
for(k=0; k<3; k++)
abyx(1/sum(fb3x4sg+ig*12+k*4,4), fb3x4sg+ig*12+k*4, 4);
abyx(1/sum(fb4g+ig*4,4), fb4g+ig*4, 4);
d1 = distance(fcodonsg+ig*nc, fcodon0, nc);
d2 = distance(fb3x4sg+ig*12, fb3x40, 12);
d3 = distance(fb4g+ig*4, fb40, 4);
if(d1<1e-8 && d2<1e-8 && d3<1e-8)
break;
xtoy(fcodonsg+ig*nc, fcodon0, nc);
xtoy(fb3x4sg+ig*12, fb3x40, 12);
xtoy(fb4g+ig*4, fb40, 4);
} /* for(irf) */
} /* for(ig) */
axtoy(1/sum(fcodonsg+com.ngene*nc,nc), fcodonsg+com.ngene*nc, fcodon0, nc);
xtoy(fb3x4sg+com.ngene*12, fb3x40, 12);
xtoy(fb4g+com.ngene*4, fb40, 4);
for(irf=0; irf<nrf; irf++) { /* calculate com.pi[] */
zero(fcodonsg + com.ngene*nc, nc);
zero(fb3x4sg + com.ngene*12, 12);
zero(fb4g + com.ngene*4, 4);
for(ig=0; ig<com.ngene; ig++)
for(j=0; j<com.ns; j++) {
AddCodonFreqSeqGene(j, ig, fcodon0, fcodonsg+com.ngene*nc,
fb3x40, fb3x4sg+com.ngene*12, fb40, fb4g+com.ngene*4);
}
abyx(1/sum(fcodonsg+com.ngene*nc,nc), fcodonsg+com.ngene*nc, nc);
for(k=0;k<3;k++)
abyx(1/sum(fb3x4sg+com.ngene*12+k*4,4), fb3x4sg+com.ngene*12+k*4, 4);
abyx(1/sum(fb4g+com.ngene*4,4), fb4g+com.ngene*4, 4);
d1 = distance(fcodonsg+com.ngene*nc, fcodon0, nc);
d2 = distance(fb3x4sg+com.ngene*12, fb3x40, 12);
d3 = distance(fb4g+com.ngene*4, fb40, 4);
if(d1<1e-8 && d2<1e-8 && d3<1e-8) break;
xtoy(fcodonsg+com.ngene*nc, fcodon0, nc);
xtoy(fb3x4sg+com.ngene*12, fb3x40, 12);
xtoy(fb4g+com.ngene*4, fb40, 4);
} /* for(irf) */
}
/* edit com.pi & com.piG according to com.codonf */
for(ig=0; ig<com.ngene+1; ig++) {
ppi = (ig<com.ngene?com.piG[ig]:com.pi);
zero(ppi, nc);
if (com.codonf==Fequal)
fillxc(ppi,1,com.ncode);
else if (com.codonf==Fcodon || com.codonf==FMutSel0 || com.codonf==FMutSel) {
for(k=0; k<nc; k++)
if(FROM64[k]>-1) ppi[FROM64[k]] = fcodonsg[ig*nc+k];
}
else if (com.codonf==F3x4 || com.codonf==F3x4MG) {
for(k=0; k<nc; k++)
if(FROM64[k]>-1)
ppi[FROM64[k]] = fb3x4sg[ig*12+k/16]*fb3x4sg[ig*12+4+(k/4)%4]*fb3x4sg[ig*12+8+k%4];
}
else if (com.codonf==F1x4 || com.codonf==F1x4MG) {
for(k=0; k<nc; k++)
if(FROM64[k]>-1)
ppi[FROM64[k]] = fb4g[ig*4+k/16]*fb4g[ig*4+(k/4)%4]*fb4g[ig*4+k%4];
}
abyx(1/sum(ppi,com.ncode), ppi, com.ncode); /* ncode != nc */
if(ig<com.ngene) {
if (com.codonf>=F1x4 && com.codonf<=FMutSel)
xtoy(fb3x4sg+ig*12, com.f3x4[ig], 12);
/* write 1x4 tables into 3x4 tables */
if (com.codonf==FMutSel0 || com.codonf==FMutSel || com.codonf==F1x4 || com.codonf==F1x4MG) {
for(k=0; k<4; k++) {
d1 = com.f3x4[ig][0*4+k] + com.f3x4[ig][1*4+k] + com.f3x4[ig][2*4+k];
for(j=0; j<3; j++)
com.f3x4[ig][j*4+k] = d1/3;
}
}
}
}
if(com.codonf==FMutSel0) {
for(j=0,zero(com.piAA,20); j<com.ncode; j++)
com.piAA[GeneticCode[com.icode][FROM61[j]]] += com.pi[j];
matout(F0, com.piAA, 1, 20);
}
if(com.codonf>=F1x4 && com.codonf<=FMutSel)
com.pf3x4 = com.f3x4[0];
if(com.verbose && com.ngene==1) {
fprintf(fout,"\n\nCodon frequencies under model, for use in evolver (TTT TTC TTA TTG ... GGG):\n");
for(k=0; k<64; k++) {
fprintf(fout,"%12.8f",GeneticCode[com.icode][k]==-1?0:com.pi[FROM64[k]]);
if((k+1)%4==0) FPN(fout);
}
/*
fprintf(fout, "\nWrong order: AAA AAC AAG AAT ... TTT\n");
for(k=0; k<64; k++) {
ic[0] = wrongorder[k/16];
ic[1] = wrongorder[(k/4)%4];
ic[2] = wrongorder[k%4];
j = ic[0]*16+ic[1]*4+ic[2];
if(GeneticCode[com.icode][j]!=-1)
fprintf(fout,"%.8f, ", com.pi[FROM64[j]]);
}
exit(0);
*/
}
return(0);
}
int AA2Codonf(double faa[20], double fcodon[])
{
/* get codon freqs from amino acid freqs, assuming equal freq. for each syn
codon. Used in codon-based amino acid substitution models.
*/
int ic, iaa, i, NCsyn[20];
FOR(i,20) NCsyn[i]=0;
FOR(ic,64) if((iaa=GeneticCode[com.icode][ic])!=-1) NCsyn[iaa]++;
zero(fcodon, 64);
for(ic=0; ic<Nsensecodon; ic++) {
iaa=GeneticCode[com.icode][FROM61[ic]];
fcodon[ic]+=faa[iaa]/NCsyn[iaa];
}
if(fabs(1-sum(fcodon,64))>1e-6) printf("\n1 == %12.7f\n", sum(fcodon,64));
return (0);
}
int DistanceMatAA (FILE *fout)
{
int i,j, h;
double p, lst;
if(fout) fprintf(fout,"\nAA distances (raw proportions of different sites)\n");
for(h=0,lst=0; h<com.npatt; h++) lst+=com.fpatt[h];
FOR(i, com.ns) {
if(fout) fprintf(fout, "\n%-15s", com.spname[i]);
FOR(j,i) {
for(h=0,p=0; h<com.npatt; h++)
if (com.z[i][h] != com.z[j][h]) p += com.fpatt[h];
p /= lst;
SeqDistance[i*(i-1)/2+j]=p;
if(fout) fprintf(fout, " %7.4f", p);
}
}
if(fout) FPN(fout);
return (0);
}
int GetDaa (FILE* fout, double daa[])
{
/* Get the amino acid distance (or substitution rate) matrix
(grantham, dayhoff, jones, etc).
*/
FILE * fdaa;
char aa3[4]="";
int i,j, naa=20;
double dmax=0, dmin=1e40;
if(noisy>3) printf("\n\nReading matrix from %s", com.daafile);
if (com.model==REVaa_0||com.model==REVaa) puts(", to get initial values.");
fdaa = gfopen(com.daafile, "r");
for (i=0; i<naa; i++)
for (j=0,daa[i*naa+i]=0; j<i; j++) {
fscanf(fdaa, "%lf", &daa[i*naa+j]);
daa[j*naa+i] = daa[i*naa+j];
if (dmax<daa[i*naa+j]) dmax = daa[i*naa+j];
if (dmin>daa[i*naa+j]) dmin = daa[i*naa+j];
}
if(com.aaDist && (com.seqtype==1||com.model==FromCodon)) { /* codon model */
if(noisy) printf("\ndistance: %.2f --- %.2f\n", dmin, dmax);
for(i=0; i<naa; i++)
for(j=0; j<naa; j++)
com.daa[i*naa+j] /= dmax;
}
else if (com.seqtype==AAseq) {
for(i=0; i<naa; i++)
for(j=0; j<i; j++)
if(i*naa+j!=ijAAref)
daa[j*naa+i] = daa[i*naa+j] /= com.daa[ijAAref];
daa[ijAAref] = daa[(ijAAref%naa)*naa+(ijAAref/naa)] = 1;
if(com.model==Empirical) {
for(i=0; i<naa; i++)
if(fscanf(fdaa,"%lf",&com.pi[i])!=1)
error2("aaRatefile");
if (fabs(1-sum(com.pi,20))>1e-5) {
printf("\nSum of freq. = %.6f != 1 in aaRateFile\n", sum(com.pi,naa));
exit(-1);
}
}
}
fclose(fdaa);
if(fout) {
fprintf (fout, "\n%s\n", com.daafile);
for(i=0; i<naa; i++) {
fprintf (fout, "\n%4s", getAAstr(aa3,i));
for(j=0; j<i; j++)
fprintf (fout, "%5.0f", daa[i*naa+j]);
}
FPN (fout);
}
/*
SetAA1STEP();
for(i=0,FPN(frst);i<naa;i++,FPN(frst))
FOR(j,i) fprintf(frst,"%3d",AA1STEP[i*(i-1)/2+j]);
for(i=0,k=0;i<naa;i++)
FOR(j,i) if(AA1STEP[i*(i-1)/2+j]) {
fprintf(frst,"%c%c\t%.2f\n",AAs[i],AAs[j],com.daa[i*naa+j]);
k++;
}
fprintf(frst,"\n%d one-step amino acid pairs\n", k);
exit (0);
*/
return (0);
}
int SetAA1STEP (void)
{
/* Sets the global variable AA1STEP[19*20/2].
Sets com.nrate for models like AAClasses and REVaa_0.
AA1STEP[k] marks the k_th pair of amino acids that differ at one position,
Q[i*naa+j] is the k_th nonzero element if AA1STEP[k]=i*naa+j;
Lower diagonal of Q is visited, with i>j.
*/
int ncode0=com.ncode, nc, naa=20, i,j,k, ic1,ic2, ndiff, from[3],to[3];
int *Q=(int*)PMat;
setmark_61_64();
nc=Nsensecodon; com.ncode=ncode0;
for(i=0; i<naa*naa; i++) Q[i]=0;
for (i=0; i<nc; i++)
for(j=0; j<i; j++) {
ic1=FROM61[i]; from[0]=ic1/16; from[1]=(ic1/4)%4; from[2]=ic1%4;
ic2=FROM61[j]; to[0]=ic2/16; to[1]=(ic2/4)%4; to[2]=ic2%4;
for (k=0,ndiff=0; k<3; k++) if (from[k]!=to[k]) ndiff++;
if (ndiff!=1) continue;
ic1 = GeneticCode[com.icode][ic1];
ic2 = GeneticCode[com.icode][ic2];
Q[ic1*naa+ic2]++;
Q[ic2*naa+ic1]++;
}
/*
#if DEBUG
for (i=0,FPN(F0); i<naa; i++,FPN(F0)) FOR(j,i)printf("%3d",Q[i*naa+j]);
#endif
*/
for (i=0,k=0; i<naa; i++)
for(j=0; j<i; j++) {
if (Q[i*naa+j]>0) { AA1STEP[i*(i-1)/2+j] = 1; k++; }
else AA1STEP[i*(i-1)/2+j] = 0;
}
/*
for(i=0,FPN(F0);i<naa;i++,FPN(F0)) FOR(j,i)printf("%3d",AA1STEP[i*(i-1)/2+j]);
*/
if(com.seqtype==2) com.nrate = k-1; /* one element (ijAAref) is fixed */
return(0);
}
int GetOmegaAA (int OmegaAA[])
{
/* This routine reads the file OmegaAA.dat to initialize the
lower diagonal matrix OmegaAA, which specifies the aa substituion
rate classes. To be used with the codon substitution model
AAClasses, which specifies several classes of the dN/dS ratio.
OmegaAA[iaa*(iaa-1)/2+jaa]= -1 if no one-step change is possible;
= 0 for the first, background, class
= i (1,..,nclass) if iaa and jaa are in class i
*/
char *OmegaAAf="OmegaAA.dat", line[1024];
FILE *fin=NULL;
int iomega, n1step=0, i,j,k, iaa,jaa, npair, naa=20, nline=1024;
for(i=0,n1step=0; i<naa; i++) for(j=0; j<i; j++)
if (AA1STEP[i*(i-1)/2+j]) { OmegaAA[i*(i-1)/2+j] = 0; n1step++; }
else OmegaAA[i*(i-1)/2+j] = -1;
if (noisy) {
printf("\n\n%d one-step aa pairs.\n", n1step);
printf("Reading omega class from %s.\n", OmegaAAf);
}
com.nOmegaType = -1;
fin=fopen(OmegaAAf,"r");
if(fin) fscanf(fin, "%d", &com.nOmegaType);
if (com.nOmegaType<1 || com.nOmegaType>65-1) {
if (com.seqtype!=CODONseq) puts("\nTo be tested.\a");
com.nOmegaType=0;
if (com.seqtype==AAseq) {
for(i=0; i<naa; i++) for(j=0; j<i; j++) if(i*naa+j != ijAAref && AA1STEP[i*(i-1)/2+j])
OmegaAA[i*(i-1)/2+j] = com.nOmegaType++;
}
else
for(i=0; i<naa; i++) for(j=0; j<i; j++)
if(AA1STEP[i*(i-1)/2+j]) OmegaAA[i*(i-1)/2+j] = com.nOmegaType++;
printf("%d dN/dS ratios estimated from data.\n",com.nOmegaType);
}
else {
printf("%d dN/dS ratios estimated from data.\n",com.nOmegaType);
for(iomega=0; iomega<com.nOmegaType-1; iomega++) {
fscanf(fin, "%d", &j);
if (j!=iomega+1) { printf("err data file %s.", OmegaAAf); exit(-1); }
printf ("\nClass #%d: ", j);
j = fgetc (fin); if (j!=':') error2("err expecting :");
fgets (line, nline, fin);
printf ("%s\n", line);
for (j=0,npair=0; j<nline-1 && line[j] && line[j]!='\n'; j++) {
iaa = line[j];
if (!isalpha(iaa)) continue;
jaa = line[++j]; if(!isalpha(jaa)) error2("err jaa");
npair++;
printf ("\npair %2d: |%c%c| ", npair, iaa,jaa);
iaa=CodeChara((char)iaa,AAseq); jaa=CodeChara((char)jaa,AAseq);
if(iaa<0||iaa>19||jaa<0||jaa>19) error2("aa not found");
if (iaa<jaa) { k=jaa, jaa=iaa; iaa=k; }
printf ("|%c%c (%2d,%2d)| ", AAs[iaa], AAs[jaa],iaa,jaa);
if (iaa==jaa) puts("This pair has no effect.");
if (OmegaAA[iaa*(iaa-1)/2+jaa]==-1) {
puts("\nThis pair cannot change in one step and is ignored!");
continue;
}
else if (OmegaAA[iaa*(iaa-1)/2+jaa])
error2("This pair has already been specified?");
OmegaAA[iaa*(iaa-1)/2+jaa]=iomega+1;
printf (" in class %d ",iomega+1);
}
}
}
if(fin) fclose(fin);
com.nrate = com.nkappa = (com.hkyREV ? 5 : !com.fix_kappa);
com.nrate += (com.nOmega = com.nOmegaType);
/*
for (i=0; i<naa; i++,FPN(F0))
for(j=0; j<i; j++) printf ("%3d", OmegaAA[i*(i-1)/2+j]);
*/
return (0);
}
int GetCodonFreqs2 (void)
{
/* Recalcualte the expected codon frequencies (com.pi[]) using the control
variable com.codonf, and the observed codon frequencies in com.pi[].
com.pi[] is both input (observed codon frequencies) and output (expected
frequencies under the model codonf).
This is used by PairwiseCodon().
*/
int n=com.ncode, i,j, ic,b[3];
double *pi=com.pi, fb3x4[12], fb4[4], GC[3]={0};
if (com.codonf==Fequal)
{ fillxc(pi,1./n,n); return 0; }
if (com.codonf!=Fcodon && com.codonf!=FMutSel) {
for (i=0,zero(fb3x4,12),zero(fb4,4); i<n; i++) {
ic=FROM61[i]; b[0]=ic/16; b[1]=(ic/4)%4; b[2]=ic%4;
for(j=0;j<3;j++)
{ fb3x4[j*4+b[j]] += pi[i]; fb4[b[j]] += pi[i]/3.; }
}
for (i=0; i<n; i++) {
ic=FROM61[i]; b[0]=ic/16; b[1]=(ic/4)%4; b[2]=ic%4;
if (com.codonf==F3x4 || com.codonf==F3x4MG)
pi[i] = fb3x4[b[0]]*fb3x4[4+b[1]]*fb3x4[8+b[2]];
else
pi[i] = fb4[b[0]]*fb4[b[1]]*fb4[b[2]];
}
if(com.codonf==F1x4MG)
for(j=0;j<3;j++)
xtoy(fb4, com.pf3x4+j*4, 4);
else if(com.codonf==F3x4MG)
xtoy(fb3x4, com.pf3x4, 12);
abyx (1./sum(pi,n), pi, n);
GC[0] = (fb3x4[0+1]+fb3x4[0+3])*100;
GC[1] = (fb3x4[4+1]+fb3x4[4+3])*100;
GC[2] = (fb3x4[8+1]+fb3x4[8+3])*100;
/* fprintf(frst1, "\tGC123\t%.1f\t%.1f\t%.1f", GC[0],GC[1],GC[2]); */
}
return 0;
}
double lfun2dSdN (double x[], int np)
{
/* likelihood function for calculating dS and dN between 2 sequences,
com.z[0] & com.z[1:
f(i,j) = \pi_i * p_{ij}(t)
Data are clean and coded.
Transition probability pijt is calculated for observed patterns only.
*/
int n=com.ncode, h,i,k, ik, z0,z1;
double lnL=0, fh,expt[NCODE], mr=0;
NFunCall++;
k=1, ik=0;
if(com.hkyREV==0) {
if(com.fix_kappa==1) { com.pkappa[0] = com.kappa; ik = 1; }
else com.kappa = x[k]; /* Is this necessary? */
}
for(i=0; i<(com.hkyREV ? 5 : !com.fix_kappa); i++)
com.pkappa[ik++] = x[k++];
if(com.codonf==FMutSel)
for(i=0; i<3; i++)
com.pkappa[ik++] = x[k++];
if(!com.fix_omega) com.omega = x[1+com.nkappa];
if(!com.fix_kappa || !com.fix_omega)
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr, com.pkappa, com.omega,PMat);
for(k=0; k<n; k++)
expt[k] = exp(x[0]*Root[k]);
for (h=0; h<com.npatt; h++) {
if(com.fpatt[h]<1e-20) continue;
z0 = com.z[0][h];
z1 = com.z[1][h];
for(k=0,fh=0;k<n;k++)
fh += U[z0*n+k]*expt[k]*V[k*n+z1];
fh *= com.pi[z0];
if(fh<=0) {
matout(F0,x,1,np);
printf("lfun2dSdN: fh = %.9f\n",fh);
fh = 1e-70;
}
lnL -= log(fh) * com.fpatt[h];
}
return (lnL);
}
int VariancedSdN (double t, double omega, double vtw[2*2], double vdSdN[2*2])
{
/* This calculates the covariance matrix of dS & dN, using the
difference approximation, from the covariance matrix of t and
omega (vtw). com.kappa and com.pi are used. Sampling errors
in parameters other than t and omega, such as kappa and pi[],
are ignored.
JacobiSN = {{dS/dt, dS/dw}, {dN/dt,dN/dw}}
*/
int np=2;
double JacobiSN[2*2],T1[2*3],T2[2*3], S,dS,dN, dS1,dN1,dS2,dN2, eh, mr=0;
if(vtw[0]<=0 || vtw[3]<=0) {
puts("var(dS,dN) not calculable.");
zero(vdSdN,4);
return(-1);
}
/* printf("\nt & w: %.5f %.5f\n", t, omega);
matout(F0,vtw, 2,2); */
EigenQcodon(2,t,&S,&dS,&dN,NULL,NULL,NULL, &mr, com.pkappa,omega,PMat);
eh = (t+1)*Small_Diff;
EigenQcodon(2,t+eh,&S,&dS1,&dN1,NULL,NULL,NULL, &mr, com.pkappa,omega,PMat);
EigenQcodon(2,t-eh,&S,&dS2,&dN2,NULL,NULL,NULL, &mr, com.pkappa,omega,PMat);
JacobiSN[0*np+0] = (dS1 - dS2)/(2*eh);
JacobiSN[1*np+0] = (dN1 - dN2)/(2*eh);
eh = (omega+1)*Small_Diff;
EigenQcodon(2,t,&S,&dS1,&dN1,NULL,NULL,NULL, &mr, com.pkappa,omega+eh,PMat);
EigenQcodon(2,t,&S,&dS2,&dN2,NULL,NULL,NULL, &mr, com.pkappa,omega-eh,PMat);
JacobiSN[0*np+1] = (dS1 - dS2)/(2*eh);
JacobiSN[1*np+1] = (dN1 - dN2)/(2*eh);
matby(JacobiSN,vtw,T1,2,2,2);
mattransp2 (JacobiSN, T2, 2, 2);
matby(T1,T2,vdSdN,2,2,2);
/* matout(F0,vdSdN, 2,2); */
return (0);
}
double distanceHKY85 (double x[], double *kappa, double alpha);
int distance3pos(double dHKY[], double kHKY[], int *sites4, char *z1, char *z2);
int distance3pos(double dHKY[], double kHKY[], int *sites4, char *z1, char *z2)
{
/* This calculates nucleotide-based distances between two protein-coding
DNA sequences z1 and z2, both of which are coded. com.cleandata = 1 is
assumed.
*/
int i,j, h, k, ic1, ic2, from[3], to[3];
double fij[4][16]={{0}}, pi4[4]={0};
/* [0,1,2] are for 3 positions, [3] is for 4-fold */
for (h=0; h<com.npatt; h++) {
ic1=FROM61[(int)z1[h]]; from[0]=ic1/16; from[1]=(ic1/4)%4; from[2]=ic1%4;
ic2=FROM61[(int)z2[h]]; to[0]=ic2/16; to[1]=(ic2/4)%4; to[2]=ic2%4;
for(k=0; k<3; k++)
fij[k][from[k]*4+to[k]] += com.fpatt[h]/com.ls;
if(from[0]==to[0] && from[1]==to[1] && FourFold[to[0]][to[1]])
fij[3][from[2]*4+to[2]] += com.fpatt[h];
}
*sites4 = (int) sum(fij[3], 16);
if(*sites4)
FOR(k,16) fij[3][k] /= *sites4;
FOR(i,4) FOR(j,4) pi4[i] += fij[3][i*4+j]/2;
FOR(i,4) FOR(j,4) pi4[j] += fij[3][i*4+j]/2;
for(k=0; k<4; k++)
dHKY[k] = distanceHKY85(fij[k], &kHKY[k], 0);
return(0);
}
int PairwiseCodon (FILE *fout, FILE*fds, FILE*fdn, FILE*ft, double space[])
{
/* Calculates ds & dn for all pairwise codon sequence comparisons.
It uses different npatt for different pairs.
The data com.z[] should be encoded clean data, with ambiguity characters
removed. Think of what to do with raw unclean data.
JacobiSN has two columns, the 1st are deratives of dS (dS/dt, dS/dk, dS/dw)
and the second of dN.
*/
char *pz0[NS],codon[2][3]; /* pz0, npatt0, & fpatt0 hold the old information */
int npatt0=com.npatt;
double *fpatt0, ls0=com.ls;
float fp[NCODE*NCODE];
int n=com.ncode, is,js,j,k,h, i0,np, wname=15;
int nb[3],ib[3][4],ic[2], missing=0, sites4;
double x[10]={.9,1,.5,.5,.5,.5,.3}, xb[10][2]={{1e-5,50}}, large=50;
double kappab[2]={.01,999}, omegab[2]={.001,99};
double lnL, e=1e-7, *var=space+NP, S,dS,dN, mr=0;
double JacobiSN[2*3],T1[2*3],T2[2*3],vSN[2*2], dS1,dN1,dS2,dN2,y[3],eh;
/* for calculating SEs of dS & dN */
double dHKY[4], kHKY[4];
fpatt0=(double*)malloc(npatt0*3*sizeof(double));
FOR(k,com.ns) pz0[k]=com.z[k];
com.z[0] = (char*)(fpatt0+npatt0);
com.z[1] = com.z[0]+npatt0;
FOR (k,npatt0) fpatt0[k] = (float)com.fpatt[k];
if(!com.cleandata) puts("\nPairwiseCodon: pairwise deletion.");
if (com.ngene>1 && com.Mgene==1) puts("ngene>1 to be tested.");
if (noisy>1) printf("\npairwise comparison (Goldman & Yang 1994).\n");
fprintf(fout,"\npairwise comparison, codon frequencies: %s.\n",
codonfreqs[com.codonf]);
FOR(j,com.nkappa) { xb[1+j][0]=kappab[0]; xb[1+j][1]=kappab[1]; }
if(!com.fix_omega) { k=1+com.nkappa; xb[k][0]=omegab[0]; xb[k][1]=omegab[1]; }
fprintf(fds,"%6d\n", com.ns); fprintf(fdn,"%6d\n", com.ns);
fprintf(ft,"%6d\n", com.ns);
fprintf(frst, "\n\npairwise comparison (Goldman & Yang 1994)");
fprintf(frst,
"\nseq seq N S dN dS dN/dS Paras.\n");
for(is=0;is<com.ns;is++) {
fprintf(fds,"%-*s ", wname,com.spname[is]);
fprintf(fdn,"%-*s ", wname,com.spname[is]);
fprintf(ft,"%-*s ", wname,com.spname[is]);
for(js=0; js<is; js++) {
if(noisy>9) {
puts("\nInput the pair i & j (i>j) for dN-dS calculation? ");
scanf("%d%d",&is,&js);
is--; js--;
if(is>com.ns || js<0 || is<js) error2("invalid pair");
}
if(noisy>1) printf ("\n%4d vs. %3d", is+1, js+1);
fprintf(fout,"\n\n%d (%s) ... %d (%s)",
is+1,com.spname[is], js+1,com.spname[js]);
fprintf (frst, "%3d %3d ", is+1, js+1);
if(noisy>2) fprintf(frub, "\n\n%d (%s) ... %d (%s)",
is+1,com.spname[is], js+1,com.spname[js]);
for(k=0; k<n*n; k++) fp[k]=0;
if(com.cleandata) {
for(h=0; h<npatt0; h++) {
j = max2(pz0[is][h],pz0[js][h]);
k = min2(pz0[is][h],pz0[js][h]);
fp[j*n+k] += (float)fpatt0[h];
}
}
else {
for(h=0,com.ls=0; h<npatt0; h++) {
FOR(i0,2) FOR(k,3) codon[i0][k] = pz0[i0==0 ? is : js][h*3+k];
for(i0=0,missing=0; i0<2; i0++) {
for(k=0; k<3; k++)
NucListall(codon[i0][k], &nb[k], ib[k]);
if(nb[0]*nb[1]*nb[2]!=1)
{ missing=1; break; }
else
ic[i0] = FROM64[ ib[0][0]*16+ib[1][0]*4+ib[2][0] ];
}
if(missing) continue;
com.ls += (int)fpatt0[h];
j = max2(ic[0],ic[1]);
k = min2(ic[0],ic[1]);
fp[j*n+k] += (float)fpatt0[h];
}
}
for(j=0,com.npatt=0;j<n;j++) {
for(k=0; k<j+1; k++)
if(fp[j*n+k]) {
com.z[0][com.npatt] = (char)j;
com.z[1][com.npatt] = (char)k;
com.fpatt[com.npatt++] = fp[j*n+k];
}
}
if(noisy>2) printf("\n npatt=%d ",com.npatt);
for(j=0,zero(com.pi,n); j<com.npatt; j++) {
com.pi[(int)com.z[0][j]] += com.fpatt[j]/(2.*com.ls);
com.pi[(int)com.z[1][j]] += com.fpatt[j]/(2.*com.ls);
}
GetCodonFreqs2 ();
distance3pos(dHKY, kHKY, &sites4, com.z[0], com.z[1]);
np = com.np = (com.ntime=1) + com.nkappa + !com.fix_omega;
NFunCall = 0;
/* initial values and bounds */
x[0] = SeqDistance[is*(is-1)/2+js]*(0.8+0.3*rndu());
if(x[0]>3) x[0]=1.5+rndu();
if(x[0]<1e-6) x[0]=.5*rndu();
if(com.nkappa==1) { /* HKY type model */
if(is==0 && js==1) x[1] = (com.icode==1?4:1.5)+rndu();
else x[1] = (x[1]*2+2+rndu())/3;
if(x[1]>10) x[1] = 5;
xb[1][0] = 0.4;
}
else /* REV or FMutSel models, do something later */
for(j=1,x[1]=.8+.4*rndu(); j<com.nkappa; j++)
x[1+j] = .2+.4*rndu();
if(!com.fix_omega) {
k = 1+com.nkappa;
if(is==0 && js==0) x[k] = 0.2+0.2*rndu();
else x[k] = (3*x[k]+0.6*rndu())/4;
x[k] = max2(x[k],0.01);
x[k] = min2(x[k],2);
}
if(noisy>=9) {
FPN(F0); FOR(k,np) printf(" %12.6f",x[k]); FPN(F0);
FOR(k,np) printf(" %12.6f",xb[k][0]); FPN(F0);
FOR(k,np) printf(" %12.6f",xb[k][1]); FPN(F0);
}
if(com.fix_kappa && com.fix_omega)
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr, com.pkappa,com.omega,PMat);
if(np)
ming2(noisy>3?frub:NULL,&lnL,lfun2dSdN,NULL,x,xb, space,e,np);
else { x[1]=x[2]=com.kappa=com.omega=0; lnL=0; }
lnLmodel = lnL;
fprintf(fout,"\nlnL =%12.6f\n",-lnL);
FOR(k,np) fprintf(fout," %8.5f",x[k]); FPN(fout);
if(noisy>2) {
printf("\n\nt_NG = %.5f\tMLEs: ", SeqDistance[is*(is-1)/2+js]);
for(k=0;k<np;k++) printf(" %.5f", x[k]);
}
if (np && com.getSE) {
Hessian(np, x, lnL, space, var, lfun2dSdN, var+np*np);
matinv(var, np, np, var+np*np);
fprintf(fout,"SEs for parameters:\n");
FOR(k,np) fprintf(fout," %8.5f",(var[k*np+k]>0.?sqrt(var[k*np+k]):-0));
FPN(fout);
}
FPN(fout);
EigenQcodon(2,x[0],&S,&dS,&dN, NULL,NULL,NULL, &mr, com.pkappa,com.omega,PMat);
if(noisy>=3) {
puts("\nNucleotide-based analysis (approximate MLEs; use baseml to get proper MLEs):");
printf("\ndHKY (123-4):"); FOR (k,4) printf(" %8.5f", dHKY[k]);
printf("\nkHKY (123-4):"); FOR (k,4) printf(" %8.5f", kHKY[k]);
printf(" (%d four-fold sites)\n", sites4);
}
fprintf(fds," %7.4f",dS); fprintf(fdn," %7.4f",dN);
fprintf(ft," %7.4f",x[0]);
fprintf (fout,
"t= %6.4f S= %7.1f N= %7.1f dN/dS= %7.4f dN =%7.4f dS =%7.4f\n",
x[0], S, com.ls*3-S, com.omega, dN, dS);
fprintf(frst,"%8.1f %8.1f %8.4f %8.4f %8.4f", com.ls*3-S, S, dN, dS, com.omega);
for(k=0; k<np; k++) fprintf(frst," %8.4f",x[k]);
for(k=0; k<np; k++) fprintf(frst1,"\t%.4f",x[k]);
fprintf(frst1,"\t%.3f", -lnL);
fprintf(frst1,"\t%.4f\t%.4f", dN, dS);
k=np-1;
if (com.getSE)
fprintf(frst," +-%6.4f",(var[k*np+k]>0.?sqrt(var[k*np+k]):-1));
fprintf(frst," %9.3f\n",-lnL);
if(com.getSE && !com.fix_omega) {
FOR(k, np) {
FOR(j,np) y[j] = x[j];
y[k] += (eh=(x[k]+1)*Small_Diff);
if(!com.fix_kappa) com.kappa = y[1];
com.omega = y[1+!com.fix_kappa];
EigenQcodon(2,y[0],&S,&dS1,&dN1,NULL,NULL,NULL, &mr, com.pkappa,com.omega,PMat);
y[k] -= 2*eh;
if(!com.fix_kappa) com.kappa = y[1];
com.omega = y[1+!com.fix_kappa];
EigenQcodon(2,y[0],&S,&dS2,&dN2,NULL,NULL,NULL, &mr, com.pkappa,com.omega,PMat);
JacobiSN[0*np+k] = (dS1-dS2)/(2*eh);
JacobiSN[1*np+k] = (dN1-dN2)/(2*eh);
}
matby(JacobiSN, var, T1, 2, np, np);
mattransp2(JacobiSN, T2, 2, np);
matby(T1,T2,vSN,2,np,2);
/*
fputs("\nvar(dS,dN):\n", fout);
matout(fout,vSN,2,2);
*/
fprintf(fout,"dN = %7.5f +- %.5f dS = %7.5f +- %.5f",
dN,(vSN[3]>0?sqrt(vSN[3]):-0),dS,(vSN[0]>0?sqrt(vSN[0]):-0));
fprintf(fout," (by method 1)\n");
T1[0] = var[0];
T1[1] = T1[2] = var[0*np+np-1];
T1[3] = var[(np-1)*np+(np-1)];
if(com.getSE && !com.fix_omega)
VariancedSdN(x[0], x[np-1], T1, vSN);
fprintf(fout,"dN = %7.5f +- %.5f dS = %7.5f +- %.5f",
dN,(vSN[3]>0?sqrt(vSN[3]):-0),dS,(vSN[0]>0?sqrt(vSN[0]):-0));
fprintf(fout," (by method 2)\n");
}
fflush(frst); fflush(fout);
} /* for (js) */
FPN(fds); FPN(fdn); FPN(ft);
fflush(fds); fflush(fdn); fflush(ft);
} /* for (is) */
com.ls = (int)ls0; FOR(k,com.ns) com.z[k] = pz0[k];
com.npatt = npatt0; FOR(h,npatt0) com.fpatt[h] = fpatt0[h]; free(fpatt0);
return (0);
}
double lfun2AA (double t)
{
/* likelihood function for two amino acid sequences
prob(i,j) = PI_i * p(i,j,t)
The data are clean & coded (com.z[0] & com.z[1]).
Transition probability pijt is calculated for observed patterns only.
*/
int n=20, h,k, aa0,aa1;
double lnL=0, pijt,expt[20],al=com.alpha;
if(al==0) FOR(k,n) expt[k] = exp(t*Root[k]);
else FOR(k,n) expt[k] = pow(al/(al-t*Root[k]),al);
for(h=0; h<com.npatt; h++) {
aa0=com.z[0][h]; aa1=com.z[1][h];
for(k=0,pijt=0; k<n; k++)
pijt += U[aa0*n+k] * expt[k]*V[k*n+aa1];
lnL -= log(com.pi[aa0]*pijt)*com.fpatt[h];
}
return(lnL);
}
int _nestS=0; /* 189= estimate the S elements, 0= use those from com.daa[] */
static double *_Fij;
double lfun2AArev (double x[], int np)
{
/* np = _nestS + 19*3 + (1 or 2);
x[]: Q matrix, 3*pi, 1 or 2 blength
pi[0] is for the root, pi[1] & pi[2] are for Q[1] & Q[2] for the 2 branches.
See notes in PairwiseAArev().
*/
int i,j,k, n=20;
double pi[3][20], *p, Q[3][400], *T=Q[0], *Fe=Q[0], t,t1,t2, m1,m2, lnL=0;
double space[20*20*2+20*2];
NFunCall++;
for(k=0; k<3; k++) {
for(i=0,p=pi[k],p[19]=1; i<n-1; i++) p[i] = x[_nestS+k*19+i];
for(i=0,t=0; i<n; i++) t+=p[i];
for(i=0; i<n; i++) p[i]/=t;
}
if(_nestS) {
for(i=0,k=0; i<n; i++) {
for(j=0,Q[1][i*n+i]=0; j<i; j++)
if(i*n+j != ijAAref)
Q[1][i*n+j]=Q[1][j*n+i] = x[k++];
}
Q[1][ijAAref] = Q[1][(ijAAref%n)*n+(ijAAref/n)] = 1;
}
else {
for(i=0; i<n; i++)
for(j=0,Q[1][i*n+i]=0; j<=i; j++)
Q[1][i*n+j] = Q[1][j*n+i] = com.daa[i*n+j];
}
for(i=0,m1=m2=0; i<n; i++) {
for(j=0,t1=t2=0;j<n;j++) {
Q[2][i*n+j] = Q[1][i*n+j]*pi[2][j];
Q[1][i*n+j] *= pi[1][j];
t1 += Q[1][i*n+j];
t2 += Q[2][i*n+j];
}
Q[1][i*n+i] = -t1;
Q[2][i*n+i] = -t2;
m1 += pi[1][i]*t1;
m2 += pi[2][i]*t2;
}
if(com.ntime==1) { t1 = x[np-1]/2/m1; t2 = x[np-1]/2/m2; }
else { t1 = x[np-2]/m1; t2 = x[np-1]/m2; }
PMatQRev(Q[1], pi[1], t1, n, space);
PMatQRev(Q[2], pi[2], t2, n, space);
for(i=0; i<n*n; i++) Fe[i]=0;
for(k=0;k<n;k++)
for(i=0;i<n;i++)
for(j=0,t=pi[0][k]*Q[1][k*n+i]; j<n; j++)
Fe[i*n+j] += t*Q[2][k*n+j];
/* if(fabs((t=sum(Fe,n*n))-1)>1e-6) printf("Fe = %.9f != 1\n", t); */
for(i=0; i<n*n; i++) {
if(_Fij[i]<=1e-15) continue;
if(Fe[i]>1e-200)
lnL -= _Fij[i]*log(Fe[i]);
else
printf("Fij_exp = %.10f < 0\n", Fe[i]);
}
return(lnL);
}
double PairwiseAArev (int is, int js)
{
/* This calculates pairwise distance under a nonstationary model. It assumes
three sets of amino acid frequencies: pi[0] for the root, pi[1] and pi[2]
are for the Q matrices for branches 1 and 2.
It estimate the symmetrical part of the rate matrix if _nestS==189.
If _nestS==0, it uses the symmetrical part read from the com.daa file.
It can estimate 1 or 2 distances depending on com.ntime=1 or 2.
np = 189 + 19*3 + (1 or 2);
x[]: Q matrix, 3*pi, 1 or 2 blength
*/
int n=com.ncode, h,i,j,k, np=_nestS+ 19*3 + 1;
double Fij[400], x[248], xb[248][2], lnL, e=1e-9, t=0, p[20];
com.ntime=1; /* 1: t1=t2; 2: t1 and t2 */
if(com.ntime==2) np++;
_Fij=Fij;
if(com.cleandata!=1) error2("cleandata");
if(com.sspace < spaceming2(np)) {
com.sspace = spaceming2(np);
printf ("\nspace adjusted to %9lu bytes\n",com.sspace);
if((com.space=(double*)realloc(com.space,com.sspace))==NULL)
error2("oom space");
}
for(h=0,zero(Fij,n*n); h<com.npatt; h++) {
Fij[com.z[is][h]*n+com.z[js][h]] += com.fpatt[h]/com.ls;
}
if(_nestS) {
for (i=1,k=0; i<n; i++) FOR(j,i)
if(i*n+j!=ijAAref) x[k++] = (com.daa[i*n+j]+.001)*(0.8+0.4*rndu());
}
for(i=0;i<np;i++) {
x[i]=rndu(); xb[i][0]=1e-5; xb[i][1]=100;
}
lnL = lfun2AArev(x,np);
printf("\nlnL0 = %12.6f\n",-lnL);
ming2(noisy>2?frub:NULL,&lnL,lfun2AArev,NULL,x,xb, com.space, e, np);
for(k=0; k<3; k++) {
for(i=0,p[19]=1; i<n-1; i++) p[i]=x[_nestS+k*19+i];
for(i=0,t=0; i<n; i++) t+=p[i];
for(i=0; i<n; i++) p[i]/=t;
matout2(F0, p, 1, n, 7, 4);
}
return (x[_nestS + 19*3]);
}
int PairwiseAA (FILE *fout, FILE*f2AA)
{
/* Calculates pairwise distances using amino acid seqs.
Data (com.z[]) are clean and coded.
com.npatt for the whole data set is used which may be greater than
the number of patterns for each pair.
SE is not calculated.
*/
char *pz0[NS];
int n=com.ncode, j, is,js;
double x, xb[2]={0,19}, lnL, step;
if (com.ngene>1 && com.Mgene==1) error2("ngene>1 to be tested.");
if (noisy) printf("\npairwise ML distances of AA seqs.\n\n");
/*
if(com.model>Empirical_F) error2("PairwiseAA: model wrong");
*/
if(com.model==0) fillxc(com.pi,1./n, n);
if(com.model>=Empirical) GetDaa(NULL, com.daa);
if(com.model==0 || com.model==Empirical)
EigenQaa(NULL, Root, U, V, NULL);
FOR(j,com.ns) pz0[j]=com.z[j];
fprintf(fout,"\nML distances of aa seqs.\n");
if(com.alpha)
fprintf(fout,"\nContinuous gamma with alpha = %.3f is used (ncatG is ignored).\n\n",com.alpha);
fprintf(f2AA,"%6d\n", com.ns);
for(is=0; is<com.ns; is++,FPN(F0),FPN(fout),FPN(f2AA)) {
printf ("%4d vs", is+1);
fprintf(f2AA,"%-14s ", com.spname[is]);
fprintf(fout,"%-14s ", com.spname[is]);
for(js=0; js<is; js++) {
if(com.model==REVaa) {
x = PairwiseAArev(is, js);
fprintf(f2AA," %7.4f",x); fprintf(fout," %7.4f",x);
continue;
}
com.z[0]=pz0[is]; com.z[1]=pz0[js];
printf (" %2d", js+1);
if(com.model==1||com.model==Empirical_F) {
for (j=0,zero(com.pi,n); j<com.npatt; j++) {
com.pi[(int)com.z[0][j]]+=com.fpatt[j];
com.pi[(int)com.z[1][j]]+=com.fpatt[j];
}
abyx(1./sum(com.pi,n), com.pi, n);
EigenQaa(NULL,Root,U,V,NULL);
}
/* com.posG[1]=com.npatt; */
xb[0]=SeqDistance[is*(is-1)/2+js]; x=xb[0]*1.5; step=xb[0];
LineSearch(lfun2AA, &lnL, &x, xb, step, 1e-7);
fprintf(f2AA," %7.4f",x); fprintf(fout," %7.4f",x);
if (com.getSE) ;
} /* for (js) */
} /* for (is) */
FOR(j,com.ns) com.z[j]=pz0[j];
return (0);
}
char GetAASiteSpecies(int species, int sitepatt)
{
/* this returns the amino acid encoded by the codon at sitepatt in species.
Returns '*' if more than two amino acids or '-' if codon is --- or ***.
*/
int n=com.ncode, c, naa, k;
char aa, newaa;
if(com.seqtype!=1)
error2("GetAASiteSpecies() right now works for codon seqs only. Check.");
c = com.z[species][sitepatt];
if(c<n) {
aa = AAs[ GeneticCode[com.icode][FROM61[c]] ];
}
else { /* naa is = 1 or >1, precise value being incorrect. */
for(k=0,aa=-1; k<nChara[c]; k++) {
newaa = GeneticCode[com.icode][FROM61[ CharaMap[c][k] ]];
if(newaa==-1) continue;
newaa = AAs[newaa];
if(aa==-1) {
naa = 1;
aa = newaa;
}
else
if(newaa != aa) naa++;
}
if(nChara[c]==n) aa = '-';
else if(naa>1) aa = '*';
}
return (aa);
}
int PrintProbNSsites (FILE* frst, double prob[], double meanw[], double varw[], int ncat, int refsp)
{
/* This prints out posterior probabilities that each site is from a site class
under the NSsites mdoels (model=0).
This is called by both the old empirical Bayes routine (NEB) and also the new
Bayes empirical Bayes (BEB) routine.
*/
int h, hp, it, ir, lst=(com.readpattern?com.npatt:com.ls);
double psel=0, wpos=1, cutoff=0.5;
double mpostp[NCATG];
char *sig, aa;
char codons[2][4];
double St, Nt, ns, na, ndiff;
if(com.model==0) {
fprintf(frst," & postmean_w");
if(!BayesEB && com.rK[ncat-1]>1) fprintf(frst," & P(w>1)");
}
fprintf(frst,"\n(amino acids refer to 1st sequence: %s)\n\n", com.spname[refsp]);
zero(mpostp, com.ncatG);
for(h=0; h<lst; h++,FPN(frst)) {
hp = (!com.readpattern ? com.pose[h] : h);
aa = GetAASiteSpecies(refsp, hp);
fprintf(frst,"%4d %c ", h+1, aa);
for (ir=0,it=0,psel=0; ir<ncat; ir++) {
fprintf(frst," %5.5f", prob[ir*com.npatt+hp]);
if(prob[ir*com.npatt+hp] > prob[it*com.npatt+hp])
it = ir;
if(!BayesEB && com.model==0)
if(com.rK[ir] > 1) psel += prob[ir*com.npatt+hp];
mpostp[ir] += prob[ir*com.npatt+hp]/com.ls;
}
fprintf(frst, " (%2d)", it+1);
if(com.model==0) {
fprintf(frst, " %6.3f", meanw[hp]);
if(!BayesEB && psel) fprintf(frst, " %6.3f", psel);
if(BayesEB==1 && com.model==0)
fprintf(frst, " +- %6.3f", varw[hp]);
}
}
/*
if(!BayesEB) {
printf("\nmean posterior probabilities for site classes");
matout(F0, mpostp, 1, com.ncatG);
matout(F0, com.freqK, 1, com.ncatG);
}
*/
/* list of positively selected sites */
if(com.model==0) { /* NSsites models */
if(com.NSsites!=1 && com.NSsites!=7)
fprintf(frst,"\nPositively selected sites\n\n\tProb(w>1) mean w\n\n");
for(ir=0,it=0; ir<ncat; ir++)
if(BayesEB==1 || (com.freqK[ir]>.1/com.ls && com.rK[ir]>wpos)) it=1;
if(!com.aaDist && it) {
fprintf(fout,"\nPositively selected sites (*: P>95%%; **: P>99%%)\n");
fprintf(fout,"(amino acids refer to 1st sequence: %s)\n\n", com.spname[refsp]);
fprintf(fout," Pr(w>1) %25s\n\n", "post mean +- SE for w");
for(h=0; h<lst; h++) {
hp=(!com.readpattern ? com.pose[h] : h);
if(BayesEB==1)
psel = prob[(ncat-1)*com.npatt+hp];
else
for (ir=0,psel=0; ir<ncat; ir++)
if(com.rK[ir]>wpos) psel+=prob[ir*com.npatt+hp];
if(psel>cutoff) {
sig = " ";
if(psel>.95) sig = "* ";
if(psel>.99) sig = "**";
aa = GetAASiteSpecies(refsp, hp);
fprintf(fout,"%6d %c %10.3f%-8s %.3f", h+1, aa, psel, sig, meanw[hp]);
fprintf(frst,"%6d %c %10.3f%-8s %.3f", h+1, aa, psel, sig, meanw[hp]);
if(BayesEB==1 && com.model==0) {
fprintf(fout, " +- %5.3f", varw[hp]);
fprintf(frst, " +- %5.3f", varw[hp]);
}
/*********** print out both codons if 2 sequences ******/
/*
if(com.ns==2) {
codons[0] = CODONs[com.z[0][hp]]);
codons[1] = CODONs[com.z[1][hp]]);
ndiff=difcodonNG(codons[0], codons[1], &St,&Nt,&ns,&na,0,com.icode);
fprintf(fout,"\t%3s %3s %2.0f diff (ps pn: %5.3f %5.3f)", codons[0], codons[1], ndiff, ns/St, na/Nt);
}
*/
FPN(fout); FPN(frst);
}
}
FPN(fout);
if(!BayesEB==1 && com.rK[ncat-2]>wpos)
fputs("\nNote: more than one w>1. Check rst for details\n",fout);
}
}
return(0);
}
int lfunNSsites_rate (FILE* frst, double x[], int np)
{
/* This calculates the dN/dS rates for sites under models with variabel dN/dS
ratios among sites (Nielsen and Yang 1998). Modified from lfundG()
com.fhK[] holds the posterior probabilities.
*/
int h,hp, ir, it=0, refsp=0, k=com.ntime+com.nrgene+com.nkappa;
double lnL=0, fh;
double w2=x[com.np-1],psel=0, *meanw, maxmw, minmw, wpos=1.1, cutoff=0.5;
char *sig, aa;
FILE *fsites, *fras;
int continuous=0, R,G,B;
int lst=(com.readpattern?com.npatt:com.ls);
int ncolors=5; /* continuous = 0 uses the specified colors */
char sitelabel[96], *colors[5]={"darkblue", "lightblue", "purple", "pinkred", "red"};
char *colorvalues[5]={"[2,2,120]", "[133,57,240]", "[186,60,200]", "[200,60,160]", "[250,5,5]"};
if(com.nparK) error2("lfunNSsites_rate to be done for HMM.");
if((meanw=(double*)malloc(com.npatt*sizeof(double)))==NULL)
error2("oom lfunNSsites_rate"); /* meanw useful for NSsites only */
if(com.aaDist==0)
printParametersNSsites(frst,x);
else
fputs("\nCheck main result file for parameter estimates\n", frst);
fx_r(x, np);
if(com.NnodeScale)
FOR(h,com.npatt) {
for(ir=1,fh=com.fhK[h]; ir<com.ncatG; ir++)
if(com.fhK[ir*com.npatt+h]>fh) fh=com.fhK[ir*com.npatt+h];
for(ir=0; ir<com.ncatG; ir++)
com.fhK[ir*com.npatt+h]=exp(com.fhK[ir*com.npatt+h]-fh);
lnL-=fh*com.fpatt[h];
}
for(h=0; h<com.npatt; h++) {
for (ir=0,fh=meanw[h]=0; ir<com.ncatG; ir++) {
fh += (com.fhK[ir*com.npatt+h]*=com.freqK[ir]); /* prior=>posterior */
meanw[h] += com.fhK[ir*com.npatt+h]*com.rK[ir];
}
for (ir=0,meanw[h]/=fh; ir<com.ncatG; ir++) com.fhK[ir*com.npatt+h]/=fh;
lnL -= com.fpatt[h]*log(fh);
}
fprintf(frst,"\nNaive Empirical Bayes (NEB) probabilities for %d classes",com.ncatG);
if(com.model==0 && com.NSsites && com.NSsites!=1 && com.NSsites!=7)
fprintf(fout,"\nNaive Empirical Bayes (NEB) analysis");
PrintProbNSsites(frst, com.fhK, meanw, NULL, com.ncatG, refsp);
if(com.model && com.model<=NSbranch2) { /* branch&site models */
if(com.rK[0]>wpos || com.rK[1]>wpos) { /* positive sites for all lineages */
fputs("\n\nPositive sites for all lineages Prob(w>1):\n",fout);
for(h=0; h<lst; h++) {
hp=(!com.readpattern ? com.pose[h] : h);
aa = GetAASiteSpecies(refsp, hp);
psel = 0;
if(com.rK[0]>wpos) psel = com.fhK[0*com.npatt+hp];
if(com.rK[1]>wpos) psel += com.fhK[1*com.npatt+hp];
if(psel>cutoff) {
sig = "";
if(psel>.95) sig = "*";
if(psel>.99) sig = "**";
fprintf(fout, "%6d %c %.3f%s\n", h+1, aa, psel, sig);
}
}
}
if(w2>wpos && (com.freqK[com.ncatG-1]>1e-6)) { /* for foreground branches */
fprintf(fout,"\nNaive Empirical Bayes (NEB) analysis (please use the BEB results.)");
fprintf(fout,"\nPositive sites for foreground lineages Prob(w>1):\n\n");
for(h=0; h<lst; h++) {
hp=(!com.readpattern ? com.pose[h] : h);
aa = GetAASiteSpecies(refsp, hp);
psel = com.fhK[2*com.npatt+hp]+com.fhK[3*com.npatt+hp];
if(psel>cutoff) {
sig = "";
if(psel>.95) sig = "*";
if(psel>.99) sig = "**";
fprintf(fout, "%6d %c %.3f%s\n", h+1, aa, psel, sig);
}
}
}
}
fprintf (frst,"\n\nlnL = %12.6f\n", -lnL);
/* RasMol script for coloring structure */
if(com.verbose && com.model==0) {
fsites=(FILE*)fopen("SiteNumbering.txt", "r");
if(fsites) {
puts("\nCollecting RasMol commands for coloring structure into RasMol.txt");
printf("Choose color scheme (0: %d colors, 1: white->red, 2: rainbow) ",ncolors);
scanf("%d", &continuous);
fras = (FILE*)gfopen("RasMol.txt", "w");
for(h=0,maxmw=0,minmw=99; h<com.npatt; h++) {
if(maxmw < meanw[h]) maxmw = meanw[h];
if(minmw > meanw[h]) minmw = meanw[h];
}
if(continuous == 0)
for (it=0; it<ncolors; it++)
printf("\t%-10s %-20s mean_w < %7.5f\n",
colors[it], colorvalues[it], (it+1)*(maxmw-minmw)/ncolors);
fprintf(fras, "cartoon\nset background white\n");
for(h=0; h<lst; h++) {
fscanf(fsites, "%d%s", &it, sitelabel);
if(it-1!=h) { puts("Site number wrong. Giving up."); break; }
if(strchr(sitelabel, '?')) continue;
hp = (!com.readpattern ? com.pose[h] : h);
if(continuous==0) {
for (it=0; it<ncolors; it++)
if(meanw[hp]<minmw+(it+1.)*(maxmw-minmw)/ncolors+1e-9) break;
fprintf(fras,"select %s\n\t\tcolor %s\n", sitelabel, colorvalues[it]);
}
else if (continuous==1) {
it = 5+(int)(245*(meanw[hp]-minmw)/(maxmw-minmw+1e-9));
fprintf(fras,"select %s\n\t\tcolor [250, %d, %d]\n", sitelabel, 255-it,255-it);
}
else {
rainbowRGB((meanw[hp]-minmw)/(maxmw-minmw+1e-9), &R, &G, &B);
fprintf(fras, "select %s\n\t\tcolor [%d, %d, %d]\n", sitelabel, R,G,B);
}
}
fclose(fsites); fclose(fras);
}
}
free(meanw);
if(com.model==0 && (com.NSsites==NSpselection || com.NSsites==NSbetaw)
&& (com.fix_omega!=1 || com.omega!=1)) /* BEB for M2 & M8 */
lfunNSsites_M2M8(frst, x, com.np);
if(!com.fix_omega && (com.model==2 || com.model==3) && com.NSsites==2) /* BEB for branchsite A & clade C */
lfunNSsites_AC(frst, x, com.np);
return (0);
}
#ifdef NSSITESBandits
void finishup(void)
{
FILE *fend=NULL;
fend=(FILE*)gfopen("finished","w");
fclose(fend);
}
#endif
/*
(*) Codon models for variable dN/dS ratios among sites
(com.nrate includes kappa & omega) (see also CDFdN_dS)
NSsites npara
0 one-ratio 0: one ratio for all sites
1 neutral 1: p0 (w0=0, w1=1)
2 selection 3: p0, p1, w2 (w0=0, w1=1)
3 discrete 2K-1: p0,p1,..., and w0,w1,...
4 freqs K: p's (w's are fixed)
5 gamma 2: alpha, beta
6 2gamma 4: p0, alpha1,beta1, alpha2=beta2
7 beta 2: p_beta, q_beta
8 beta&w 4: p0, p_beta, q_beta, w estimated
9 beta&gamma 5: p0, p_beta, q_beta, alpha, beta
10 beta&1+gamma 5: p0, p_beta, q_beta, alpha, beta (1+gamma used)
11 beta&1>normal 5: p0, p_beta, q_beta, mu, s (normal truncated w>1)
12 0&2normal 5: p0, p1, mu2, s1, s2
13 3normal 6: p0, p1, mu2, s0, s1, s2
14 M8a:beta&w=1 3: p0, p_beta, q_beta, w=1 fixed
15 M8a:beta&w>=1 4: p0, p_beta, q_beta, w>=1 estimated
NSsites = 14 forces change to fix_omega so we can't have 2 models in one run.
NSsites = 15 would not set omegab[0] correctly for the next tree.
(*) Codon models for variable dN/dS ratios among both branches and sites
(model=2, NSsites=3 or 2)
(com.nrate includes kappa & omega)
Parameters include branchlens, kappa, p0, p1, w0, w1, w2
method = 0: SetPSiteClass copies w's to nodes[].omega and PMat is calculated
in ConditionalPNode().
method = 1: PMat for branch of interest is calulated in lfuntdd_SiteClass().
The two types of branches have different Qfactor_NS: Qfactor_NS_branch[2].
August 2000.
(*) Codon (perhaps aa as well) site-class models
NSsites=3, ncatG=3 or 2 etc
aaDist:
1-6 for G1974,Miyata,c,p,v,a
FIT1 & FIT2 (11, 12): fitness model F_j = a_p*(p-p*)^2+a_v*(v-v*)^2
FIT1: w_ij = exp(F_j - F_i)
FIT2: w_ij = b*exp(F_j - F_i)
FIT1 & FIT2 are also implemented for NSsites=0
(*) Amino acid models
REVaa: The symmetrical part (S) of the rate matrix Q=S*PI are estimated,
making up 19*20/2-1=189 rate parameters for the matrix. The aa
frequencies are estimated using the observed ones. The Sij for
ijAAref=19*naa+9 (I-V) is set to one and others are relative rates;
REVaa_0: AA1STEP[i*(i+1)+j] marks the aa pair i & j that are
interchangeable. Sij for ijAAref=19*naa+9 (I-V) is set to one
and others are relative rates;
(*)
Codon & amino acid models
AAClasses: OmegaAA[i*(i-1)/2+j] marks the dN/dS ratio class for the pair
i & j. Note kappa is before omega's in x[].
OmegaAA[i*(i-1)/2+j]=-1, if AAs i & j are not interchangeable
=0, for the background ratio
=1,...,nclass for AAs i & j specified in OmegaAA.dat.
The total number of classes (com.nOmega) is one plus the number
specified in the file OmegaAAf.
com.nOmega is the number of different dN/dS ratios in the NSbranchB, NSbranch2 models
and in AAClasses.
nodes[].label marks the dN/dS ratio for the node in the NSbranchB NSbranch2 models
AA1STEP[i*(i-1)/2+j] =1 if AAs i & j differ at one codon position;
=0 otherwise.
(*) Codon and amino acid models
aaDist = -5,-4,-3,-2,-1,1,2,3,4,5:
Geometric and linear relationships between amino acid distance and
substitution rate:
wij = a*(1-b*dij/dmax)
wij = a*exp(-b*dij/dmax)
aaDist = 0:equal, +:geometric; -:linear, {1-5:Grantham,Miyata,c,p,v}
aaDist = 11, 12: fitness models, see above.
*/
#if 0 /* routines for testing codon-models */
int GetCategoryQc (char z[NS])
{
/* the category ID for a codon site with z[NS], transformed
classified into 19 categories
*/
int i,j, icat, ncat=19, it, b[NS][3], nbase[3], markb[4];
puts("\nDo not work with missing data, GetCategoryQc.");
for (j=0; j<com.ns; j++) {
it=FROM61[(int)z[j]]; b[j][0]=it/16; b[j][1]=(it/4)%4; b[j][2]=it%4;
}
FOR (i,3) {
FOR (j,4) markb[j]=0;
FOR (j,com.ns) markb[b[j][i]]=1;
nbase[i]=markb[0]+markb[1]+markb[2]+markb[3]-1;
}
if(nbase[1]>=2) icat=ncat-1;
else {
if(nbase[0]>2) nbase[0]=2; if(nbase[2]>2) nbase[2]=2;
icat = nbase[1]*9+nbase[0]*3+nbase[2];
}
return (icat);
}
int TestModelQc (FILE * fout, double x[])
{
/* Test the Qc model, slower than simulations
*/
char z[NS];
int h, npatt, it, icat, j, nodeb[NS], imposs;
int n=Nsensecodon, isum, nsum, ncat=19;
double fh, y, nobs[19], pexp[19], Pt[8][NCODE*NCODE];
puts("\nDo not work with missing data, GetCategoryQc.");
puts("\ntest Qc..\n");
for (h=0,zero(nobs,ncat); h<com.npatt; h++) {
for (j=0; j<com.ns; j++) z[j]=com.z[j][h]-1;
icat = GetCategoryQc(z);
nobs[icat]+=com.fpatt[h];
}
FOR (j,ncat)
printf("cat #%4d: %4d%4d%4d%6.0f\n", j+1,j/9+1,(j/3)%3+1,j%3+1,nobs[j]);
if (com.ns>5 || com.alpha || com.ngene>1)
error2 ("TestModelQc: ns>5 || alpha>0.");
if (SetParameters (x)) puts ("\npar err..");
for (j=0,npatt=1; j<com.ns; j++) npatt*=n;
for (isum=0,nsum=1; isum<tree.nnode-com.ns; nsum*=n,isum++) ;
printf("\nTest Qc: npatt = %d\n", npatt);
FOR (j, tree.nbranch)
PMatUVRoot (Pt[j], nodes[tree.branches[j][1]].branch,n,U,V,Root);
for (h=0,zero(pexp,ncat); h<npatt; h++) {
for (j=0,it=h; j<com.ns; nodeb[com.ns-1-j]=it%n,it/=n,j++) ;
for (j=0,imposs=0; j<com.ns; j++)
{ z[j]=nodeb[j]; if (com.pi[(int)z[j]]==0) imposs=1; }
if (imposs) continue;
if ((icat=GetCategoryQc(z)) == ncat-1) continue;
if ((h+1)%100==0)
printf("\rTest Qc:%9d%4d%9.2f%%", h+1, icat, 100.*(h+1.)/npatt);
for (isum=0,fh=0; isum<nsum; isum++) {
for (j=0,it=isum; j<tree.nbranch-com.ns+1; j++)
{ nodeb[com.ns+j]=it%n; it/=n; }
for (j=0,y=com.pi[nodeb[tree.root]]; j<tree.nbranch; j++)
y*=Pt[j][nodeb[tree.branches[j][0]]*n+nodeb[tree.branches[j][1]]];
fh += y;
}
if (fh<=0) {
matout (F0, x, 1, com.np);
printf ("\a\ntest Qc: h=%4d fh=%9.4f \n", h, fh);
}
pexp[icat]+=fh;
}
pexp[ncat-1]=1-sum(pexp,ncat-1);
FOR (j,ncat)
fprintf(fout, "\ncat # %4d%4d%4d%4d%6.0f%10.5f%10.2f",
j+1, j/9+1, (j/3)%3+1, j%3+1, nobs[j], pexp[j], com.ls*pexp[j]);
return (0);
}
#endif
#if (DSDN_MC || DSDN_MC_SITES)
void SimulateData2s61(void)
{
/* This simulates two codon sequences and analyze using ML (GY94).
It generates site pattern freqs and then samples from them
to generate the seq data. Codons are coded as 0,1,...,60. There
is another routine of a similar name in the file dsdn.c where the
codons are coded as 0,1,...,63. The two routines should not be
mixed.
Note that com.pi[] is changed in the analysis but not reused to
calculate Efij[]
Ifdef (DSDN_MC_SITES), the data will be simulated with the NSsites models
but analysed assuming one omega for all sites, so the model is wrong.
*/
char infile[32]="in.codon2s", seqfile[32]="codonseq.tmp",str[4]="";
FILE *fseq, *fin;
int ir,nr=100, ip, i,j,k,h, n=Nsensecodon;
int npatt0=n*(n+1)/2, nobs[NCODE*NCODE];
int il,nil, ls[50]={0,200,500};
double y, x[6]={1,1,1},xb[6][2], S,dN,dS,dNt,dSt,om,lnL, mr=0;
double t0,kappa0,omega0=.5,pi0[NCODE], mx[6],vx[6],mse[6]; /* t,k,w,dS,dN */
double Efij[NCODE*(NCODE+1)/2], space[50000];
com.icode=0; com.seqtype=1; com.ns=2;
com.ncode=n; com.cleandata=1; setmark_61_64 ();
for(j=0; j<com.ns; j++)
com.z[j] = (char*) malloc(npatt0*sizeof(char));
if(com.z[com.ns-1]==NULL) error2("oom z");
if((com.fpatt=(double*)malloc(npatt0*sizeof(double)))==NULL)
error2("oom fpatt");
for(j=0; j<3; j++) { xb[j][0]=.0001; xb[j][1]=99; }
#if (DSDN_MC_SITES)
strcpy(infile,"in.codon2sSites");
#endif
printf("\nTwo codon seq. simulation for ML (GY94), input from %s\n",infile);
fin=gfopen(infile,"r");
fscanf (fin,"%d%d%d%d", &k,&nr, &com.codonf, &nil);
SetSeed(-1, 0);
printf("\n%d replicates, %s model for analysis\nLc:",
nr, codonfreqs[com.codonf]);
for(il=0; il<nil; il++)
fscanf(fin, "%d", &ls[il+1]);
matIout(F0, ls+1, 1, nil);
for(i=0,k=0; i<NCODE; i++) {
fscanf(fin,"%lf",&y);
if(GeneticCode[com.icode][i]>-1) pi0[k++]=y;
else if(y!=0)
error2("stop codon freq !=0");
}
printf("sum pi = 1 = %.6f\n", sum(pi0,n));
for(ip=0; ip<99; ip++) {
fscanf(fin, "%lf%lf", &t0,&kappa0);
if(t0<0) exit(0);
printf("\n\nParameter set %d\nt0 =%.2f kappa0 =%.2f\n",ip+1,t0,kappa0);
fprintf(frst,"\n\nParameter set %d\nt0 =%.2f kappa0 =%.2f\n",ip+1,t0,kappa0);
for(j=0; j<n; j++) com.pi[j] = pi0[j];
com.ls=1;
#if (DSDN_MC_SITES)
com.NSsites=3;
fscanf(fin,"%d", &com.ncatG);
for(i=0; i<com.ncatG; i++) fscanf(fin,"%lf", &com.freqK[i]);
for(i=0; i<com.ncatG; i++) fscanf(fin,"%lf", &com.rK[i]);
printf("\nSite classe model (K=%d)\np: ",com.ncatG);
for(i=0; i<com.ncatG; i++)
printf("%7.4f",com.freqK[i]);
printf("\nw: "); FOR(i,com.ncatG) printf("%7.4f",com.rK[i]); FPN(F0);
fprintf(frst,"\nSite classe model (K=%d)\np: ",com.ncatG);
for(i=0; i<com.ncatG; i++)
fprintf(frst,"%7.4f",com.freqK[i]);
fputs("\nw: ",frst); FOR(i,com.ncatG) fprintf(frst,"%7.4f",com.rK[i]); FPN(frst);
if(1-sum(com.freqK,com.ncatG))
error2("freqs do not sum to 1");
for(j=0,Qfactor_NS=0,dS=dN=0; j<com.ncatG; j++) {
freqK_NS = com.freqK[j];
EigenQcodon(2,1,&S,&dSt,&dNt,NULL,NULL,NULL, &mr, &kappa0,com.rK[j],PMat);
dS += freqK_NS*dSt;
dN += freqK_NS*dNt;
}
Qfactor_NS = 1/Qfactor_NS;
om = (dS>0?dN/dS:-1);
dS *= t0*Qfactor_NS;
dN *= t0*Qfactor_NS;
#else
fscanf(fin,"%lf", &omega0);
EigenQcodon(2,t0,&S,&dS,&dN, NULL,NULL,NULL, &mr, &kappa0,omega0,space);
om=omega0;
#endif
printf("\nCorrect values");
printf("\nS%%=%7.4f dS=%7.4f dN=%7.4f w=%7.4f\n",S/3,dS,dN,om);
fprintf(frst,"\nCorrect values");
fprintf(frst,"\nS%%=%7.4f dS=%7.4f dN=%7.4f w=%7.4f\n",S/3,dS,dN,om);
/* calculate Efij[], the site pattern probabilities */
FOR(j,n) com.pi[j]=pi0[j];
#if (DSDN_MC_SITES)
com.NSsites=3;
for(k=0,zero(Efij,npatt0); k<com.ncatG; k++) {
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr, &kappa0,com.rK[k],PMat);
PMatUVRoot(PMat, t0, n, U, V, Root);
for(i=0,h=0;i<n;i++) for(j=0;j<=i;j++) {
y=com.pi[i]*PMat[i*n+j];
Efij[h++] += (i==j?y:2*y) * com.freqK[k];
}
}
com.NSsites=0;
#else
EigenQcodon(1,-1,NULL,NULL,NULL,Root, U, V, &mr, &kappa0, omega0, PMat);
PMatUVRoot (PMat, t0, n, U, V, Root);
for(i=0,h=0;i<n;i++) for(j=0;j<=i;j++) { /* why for each il? */
y=com.pi[i]*PMat[i*n+j];
Efij[h++]=(i==j?y:2*y);
}
#endif
for(i=h=0,com.ls=1,com.npatt=npatt0;i<n;i++) for(j=0;j<=i;j++) {
com.z[0][h]=(char)i; com.z[1][h]=(char)j;
com.fpatt[h]=Efij[h]; h++;
}
if(fabs(1-sum(Efij,npatt0))>1e-6) error2("sum Efij != 1");
for(il=0; il<nil+1; il++) {
com.ls=ls[il];
if(com.ls==0) {
puts("\nML estimates from infinite data");
com.ls=1;
x[0]=t0*rndu(); x[1]=kappa0; x[2]=omega0*rndu();
GetCodonFreqs2 ();
ming2(NULL,&lnL,lfun2dSdN,NULL,x,xb, space,1e-10,3);
printf("lnL = %.6f\n",-lnL);
EigenQcodon(2,x[0],&S,&dS,&dN, NULL,NULL,NULL, &mr, &x[1],x[2],space);
printf("S%%=%7.4f dS=%7.4f dN=%7.4f w=%7.4f\n",S/3,dS,dN,x[2]);
fprintf(frst,"ML estimates from infinite data\nt=%7.4f k=%7.4f",x[0],x[1]);
fprintf(frst," S%%=%7.4f dS=%7.4f dN=%7.4f w=%7.4f\n",S/3,dS,dN,x[2]);
for(h=1;h<npatt0; h++) Efij[h]+=Efij[h-1];
puts("\nt & k & w & dS & dN");
fputs("\nLc & t & k & w & dS & dN\n",frst); fflush(frst);
continue;
}
printf("\nls = %d\n", com.ls);
for(ir=0,zero(mx,6),zero(vx,6),zero(mse,6); ir<nr; ir++) {
MultiNomial(com.ls, npatt0, Efij, nobs, NULL);
for(i=0,com.npatt=0,zero(com.pi,n);i<n;i++) for(j=0;j<=i;j++)
if(nobs[k=i*(i+1)/2+j]) {
com.z[0][com.npatt]=i; com.z[1][com.npatt]=j;
com.fpatt[com.npatt++]=nobs[k];
}
for(i=0,zero(com.pi,n); i<com.npatt; i++) {
com.pi[com.z[0][i]]+=com.fpatt[i]/(2.*com.ls);
com.pi[com.z[1][i]]+=com.fpatt[i]/(2.*com.ls);
}
GetCodonFreqs2 ();
x[0]=t0; x[1]=kappa0; x[2]=omega0;
/* printf("\nlnL=%9.6f\n",-lfun2dSdN(x,3)); */
ming2((noisy?F0:NULL),&lnL,lfun2dSdN,NULL,x,xb, space,1e-7,3);
EigenQcodon(2,x[0],&S,&x[3],&x[4], NULL,NULL,NULL, &mr, &x[1],x[2],space);
FOR(j,5) {
vx[j] += (x[j]-mx[j])*(x[j]-mx[j]);
mx[j] = (mx[j]*ir+x[j])/(ir+1.);
}
mse[0]+=square(x[2]-omega0);
printf("\r%4d%8.4f%8.4f%8.4f %8.4f%8.4f%8.4f%8.4f%8.4f",
ir+1,x[0],x[1],x[2],mx[0],mx[1],mx[2],mx[3],mx[4]);
#if 0
if(ir==9) {
fseq=gfopen(seqfile,"w");
fprintf(fseq,"%6d %6d\n", com.ns,com.ls*3);
for(i=0;i<2;i++,FPN(fseq),fflush(fseq)) {
fprintf(fseq,"seq.%-5d ", i+1);
FOR(h,com.npatt) FOR(k,(int)com.fpatt[h])
fprintf(fseq,"%s", getcodon(str,FROM61[com.z[i][h]]));
}
fclose(fseq); exit(0);
}
#endif
}
if(nr>1) { FOR(j,5) vx[j]=sqrt(vx[j]/(nr-1.)/nr); mse[0]=sqrt(mse[0]/nr); }
fprintf(frst,"%4d ", com.ls);
FOR(i,5) fprintf(frst,"%7.4f +%7.4f", mx[i],vx[i]); FPN(frst);
} /* for (ii) */
} /* for(ip) */
exit(0);
}
void Ina(void)
{
/* This simulates two codon sequences and analyze them using Ina's method.
Ina's program is modified to output result in Ina1.tmp. Consistency
analysis is done by generating long sequences.
Note that com.pi[] is not changed in the analysis, which is done outside
the program.
*/
char seqfile[32]="codonseq.tmp",tmpfile[32]="Ina1.tmp",str[4]="";
FILE *fseq, *ftmp;
int ip,ir,nr=500, i,j,k,h, n=Nsensecodon;
int npatt0=n*(n+1)/2, nobs[NCODE*NCODE];
int il,nil=1, ls[]={500,100,200,300,400,500,600,800,1000}, fcodon=1;
double y, t=.5,f3x4[12], x[3]={1,1,1}, S,dS,dN, mr=0;
double t0=1,kappa0=1,omega0=1, mx[6],vx[6],mse[6]; /* t,k,w,dS,dN */
double Efij[NCODE*NCODE], space[50000];
double f3x4_data[][3*4]={
{0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25},
{0.20517, 0.28293, 0.30784, 0.20406, /* mt pri */
0.40979, 0.27911, 0.18995, 0.12116,
0.15105, 0.43290, 0.37123, 0.04482},
{0.19020, 0.16201, 0.36655, 0.28124, /* hiv */
0.28889, 0.18805, 0.30179, 0.22127,
0.24875, 0.16894, 0.36822, 0.21410},
{0.14568, 0.24519, 0.33827, 0.27086,
0.35556, 0.18765, 0.24049, 0.21630,
0.26444, 0.25728, 0.21012, 0.26815} /* lysinNew*/
};
puts("\nThis simulates data and analyses them using Ina95.");
printf ("fcodon? ");
scanf ("%d", &fcodon);
FOR(j,12) f3x4[j]=f3x4_data[fcodon][j];
for(j=0,h=0,y=0; j<64; j++) {
if (GeneticCode[com.icode][j]==-1) continue;
com.pi[h]=f3x4[j/16]*f3x4[4+(j%16)/4]*f3x4[8+j%4];
y+=com.pi[h++];
}
FOR(j,n) com.pi[j]/=y;
printf("fcodon: %d\n",fcodon);
matout(frst,f3x4,3,4);
com.icode=0; com.seqtype=1; com.ns=2; com.ls=1; npatt0=n*(n+1)/2;
com.ncode=n; setmark_61_64 ();
FOR(j,com.ns) com.z[j]=(char*) malloc(npatt0*sizeof(char));
if(com.z[com.ns-1]==NULL) error2 ("oom z");
if((com.fpatt=(double*)malloc(npatt0*sizeof(double)))==NULL)
error2("oom fpatt");
printf("\nInfinite sequences.\nsum pi=1=%.6f\n",sum(com.pi,NCODE));
noisy=0; FOR(i,6) x[i]=0;
FOR(ip,99) {
printf("\nt0 & kappa0 & omega0? ");
scanf("%lf%lf%lf", &t0,&kappa0,&omega0);
if(t0<0) exit(0);
printf("t0 =%.2f & kappa0 =%.2f & omega0 =%.2f\n",t0,kappa0,omega0);
fprintf(frst, "\nt & k & w: %8.2f%8.2f%8.2f\n\n", t0,kappa0,omega0);
EigenQcodon(2,t0,&S,&dS,&dN, NULL,NULL,NULL, &mr, &kappa0,omega0,space);
fprintf(frst,"\nS/(S+N)=%7.4f dS=%7.4f dN=%7.4f\n",S/3,dS,dN);
fputs("Lc & t & k & w & dS & dN\n",frst);
EigenQcodon(1,-1,NULL,NULL,NULL,Root, U, V, &mr, &kappa0, omega0, PMat);
PMatUVRoot (PMat, t0, n, U, V, Root);
for(i=0,h=0;i<n;i++) for(j=0;j<=i;j++) {
y=com.pi[i]*PMat[i*n+j];
Efij[h++]=(i==j?y:2*y);
}
for(i=h=0,com.ls=1,com.npatt=npatt0;i<n;i++) for(j=0;j<=i;j++) {
com.z[0][h]=(char)i; com.z[1][h]=(char)j;
com.fpatt[h]=Efij[h]; h++;
}
for(h=1;h<npatt0; h++) Efij[h]+=Efij[h-1];
if(fabs(1-Efij[npatt0-1])>1e-6) puts("Sum p_ij != 1.");
for(il=0; il<nil; il++) {
com.ls=ls[il];
printf("\nls = %d\n", com.ls);
for(ir=0,zero(mx,6),zero(vx,6),zero(mse,6); ir<nr; ir++) {
printf("\r%4d", ir+1);
MultiNomial (com.ls, npatt0, Efij, nobs, NULL);
for(i=0,com.npatt=0;i<n;i++) for(j=0;j<=i;j++)
if(nobs[k=i*(i+1)/2+j]) {
com.z[0][com.npatt]=i; com.z[1][com.npatt]=j;
com.fpatt[com.npatt++]=nobs[k];
}
fseq=gfopen(seqfile,"w");
fprintf(fseq,"> %6d %6d\n", com.ns,com.ls*3);
for(i=0;i<2;i++,FPN(fseq),fflush(fseq)) {
fprintf(fseq,"seq.%-5d ", i+1);
FOR(h,com.npatt) FOR(k,(int)com.fpatt[h])
fprintf(fseq,"%s", getcodon(str,FROM61[com.z[i][h]]));
}
fclose(fseq);
if(com.ls>2000) system("Ina1Large codonseq.tmp >t");
else system("Ina1 codonseq.tmp >t");
ftmp=gfopen(tmpfile,"r");
if(fscanf(ftmp,"%lf%lf%lf",&x[0],&x[1],&x[2]) !=3)
error2("reading tmpf");
fclose(ftmp);
FOR(j,5) {
vx[j] += (x[j]-mx[j])*(x[j]-mx[j]);
mx[j] = (mx[j]*ir+x[j])/(ir+1.);
}
mse[0]+=square(x[0]-omega0);
printf("%7.4f%7.4f%7.4f %7.4f%7.4f%7.4f%7.4f%7.4f",
x[0],x[1],x[2],mx[0],mx[1],mx[2],mx[3],mx[4]);
/* fprintf(frst1,"%7.4f%7.4f%7.4f %7.4f%7.4f%7.4f%7.4f%7.4f\n",
x[0],x[1],x[2],mx[0],mx[1],mx[2],mx[3],mx[4]);
*/
}
if(nr>1) { FOR(j,5) vx[j]=sqrt(vx[j]/(nr-1.)/nr); mse[0]=sqrt(mse[0]/nr); }
fprintf(frst,"%4d ", com.ls);
FOR(i,5) fprintf(frst,"%7.3f +%7.4f", mx[i],vx[i]);
FPN(frst); fflush(frst);
fprintf(frst1,"%6d%6d %7.2f%7.2f%7.2f: %8.3f +%7.3f\n",
com.ls,nr, t0,kappa0,omega0, mx[0],mse[0]);
fflush(frst1);
} /* for (il) */
} /* for (ip) */
exit(0);
}
#endif
#if 0
int mergeSeqs(FILE*fout)
{
/* This concatenate multiple genes (data sets) for the same set of species
into one file of a long gene. Used to process Anne Yoders' alignment.
*/
char *filenames[12]={"NADH1.fin","NADH2.fin","COI.fin","COII.fin","ATPase8.fin",
"ATPase6.fin","COIII.fin","NADH3.fin","NADH4L.fin","NADH4.fin",
"NADH5.fin", "Cytb.fin"};
int ns0=32, nfile=12, ifile, ls0, lswhole=20000, i,h, lgene0[32];
char *z0[32], *spname0[32]={"Artibeus", "B.musculus", "B.physalus", "Bos",
"Canis", "Cavia", "Ceratother", "Dasypus", "Didelphis", "E.asinus",
"E.caballus","Erinaceus", "Felis", "Glis", "Gorilla", "Halichoeru", "Homo",
"Hylobates", "Macropus", "Mus", "Ornithorhy", "Oryctolagu", "Ovis",
"P.paniscus", "P.troglody", "Papio", "Phoca", "P.abelii",
"P.pygmaeus", "Rattus", "Rhinoceros", "Sus"};
FILE *fseq;
noisy=0;
FOR(i,ns0) if((z0[i]=(char*)malloc(lswhole*sizeof(char)))==NULL)
error2("oom z");
for(ifile=0,ls0=0; ifile<nfile; ifile++) {
printf("Reading data set %2d/%2d (%s)", ifile+1,nfile,filenames[ifile]);
fseq=gfopen (filenames[ifile],"r");
ReadSeq(NULL,fseq,1);
lgene0[ifile]=com.ls; com.ls*=3;
FOR(i,ns0) if(strcmp(spname0[i],com.spname[i])) error2("spname different");
FOR(i,ns0) FOR(h,com.ls) z0[i][ls0+h]=com.z[i][h];
ls0+=com.ls;
printf(" + %5d = %5d\n", com.ls, ls0);
}
fprintf(fout,"%6d %6d G\nG %4d ", ns0,ls0,nfile);
FOR(ifile,nfile) fprintf(fout, " %4d", lgene0[ifile]); FPN(fout);
for(i=0;i<ns0;i++,FPN(fout)) {
fprintf(fout,"%-12s ", spname0[i]);
FOR(h,ls0) {
fprintf(fout,"%c", z0[i][h]);
if((h+1)%3==0) fprintf(fout," ");
}
}
return(0);
}
#endif
int SlidingWindow(FILE*fout, FILE* fpair[], double space[])
{
/* sliding window analysis, clean data, 2 sequences only */
int wlen=windowsize0, offset=offset0, wstart, n=com.ncode, j, h, positive=0;
int ls0=com.ls, npatt0=com.npatt;
char *z0[NS];
double *fpatt0, pi0[NCODE], lnL0=0, lnL1=0;
if(com.seqtype!=1) error2("implemented for codon sequences only.");
if(com.runmode!=-2) error2("this version of sliding windows requires runmode=-2");
if(!com.cleandata || com.ngene>1)
error2("clean data & one gene only for sliding window analysis");
if(com.print)
error2("Choose RateAncestor=0 for sliding window analysis");
for(j=0; j<com.ns; j++)
z0[j] = com.z[j];
for(j=0; j<com.ns; j++)
if((com.z[j]=malloc(npatt0*sizeof(char)))==NULL) error2("oom z");
if((fpatt0=(double*)malloc(npatt0*sizeof(double)))==NULL) error2("oom fp");
for(h=0; h<com.npatt; h++)
fpatt0[h] = com.fpatt[h];
for(j=0; j<n; j++)
pi0[j] = com.pi[j];
for (wstart=0; wstart+wlen<=ls0; wstart+=offset) {
for(h=0; h<npatt0; h++)
com.fpatt[h] = 0;
for(h=wstart; h<wstart+wlen; h++)
com.fpatt[com.pose[h]]++;
for(h=0,com.npatt=0,zero(com.pi,n); h<npatt0;h++) if(com.fpatt[h]>0) {
for(j=0; j<com.ns; j++)
com.z[j][com.npatt] = z0[j][h];
com.fpatt[com.npatt] = com.fpatt[h];
com.npatt++;
}
com.ls = wlen;
com.posG[0] = 0; com.posG[1] = com.npatt;
com.fix_omega = 1; com.omega = 1;
PairwiseCodon(fout,fpair[3],fpair[4],fpair[5],com.space);
lnL0 = -lnLmodel; /* lnLmodel passed overhead from PairwiseCodon() */
com.fix_omega = 0; com.omega = 0.5;
PairwiseCodon(fout,fpair[3],fpair[4],fpair[5],com.space);
lnL1 = -lnLmodel;
if(com.omega>1 && (lnL1-lnL0)>2.71/2) {
positive = 1;
break;
}
if(noisy)
printf("sites %3d -- %3d (%d) npatt:%4d w=%.4f\n",wstart+1,wstart+wlen,ls0,com.npatt, com.omega);
fprintf(fout,"\nsites %3d -- %3d %4d",wstart+1,wstart+wlen,com.npatt);
/* Forestry(fout); */
}
fprintf(frst1, " %2d", positive);
printf(" %2d", positive);
com.ls = ls0; com.posG[1] = com.npatt = npatt0;
for(h=0; h<com.npatt; h++)
com.fpatt[h] = fpatt0[h];
xtoy(pi0, com.pi, n);
free(fpatt0);
for(j=0; j<com.ns; j++) {
free(com.z[j]);
com.z[j] = z0[j];
}
return(positive);
}
void Get4foldSites(void)
{
/* This collects the four-fold degerate sites into a file named 4fold.nuc.
The data are not coded yet, and the routine is called from ReadSeq().
*/
int ls4, j,k,h, ib[3][4], nb[3];
char file4[12]="4fold.nuc", *mark4;
FILE *f4;
f4=gfopen(file4,"w");
if ((mark4=(char*)malloc(com.ls*sizeof(char)))==NULL) error2("oom mark");
FOR(h,com.ls) mark4[h]=0;
for (h=0,ls4=0; h<com.ls; h++) {
for(k=0; k<3; k++)
NucListall(com.z[0][h*3+k], &nb[k], ib[k]);
if(nb[0]==1 && nb[2]==1 && FourFold[ib[0][0]][ib[1][0]]) {
for(j=1; j<com.ns; j++)
for(k=0; k<2; k++) if(com.z[j][h*3+k]!=com.z[0][h*3+k]) goto nextsite;
mark4[h]=1; ls4++;
}
nextsite: ;
} /* for(h) */
fprintf (f4, "%6d %6d\n", com.ns, ls4);
for (j=0; j<com.ns; j++) {
fprintf (f4, "\n%s\n", com.spname[j]);
for (h=0; h<com.ls; h++)
if(mark4[h]) fprintf (f4, "%c", com.z[j][h*3+2]);
FPN (f4);
}
fprintf(f4, "\n\ncodons included\n");
for(h=0; h<com.ls; h++)
if(mark4[h]) fprintf(f4, " %2d", h+1);
FPN(f4);
fclose(f4); free(mark4);
}
double distanceHKY85 (double x[], double *kappa, double alpha);
void d4dSdN(FILE* fout)
{
/* This looks at the 4-fold degerenate sites.
*/
char str1[4]=" ", str2[4]=" ";
int i,j,k, n=com.ncode, b[2][3], ic1,ic2,iaa;
double pS4,d4,kappa4fold;
double fij, fij4f[4*4], pi4f[4], pstop,t, S,dS,dN,dN_dS, mr=0;
double fb3x4[12]={.25, .25, .25, .25,
.25, .25, .25, .25,
.25, .25, .25, .25};
int nii=18, ii;
double t0[]={0.001, 0.01,0.05, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1, 1.2, 1.5, 2,2.5,3};
com.ls=1; com.kappa=3; com.omega=1;
fb3x4[0*4+0]=0.35;
fb3x4[0*4+1]=0.15;
fb3x4[0*4+2]=0.35;
fb3x4[0*4+3]=0.15;
/*
fb3x4[1*4+0]=0.35;
fb3x4[1*4+1]=0.15;
fb3x4[1*4+2]=0.35;
fb3x4[1*4+3]=0.15;
*/
fb3x4[2*4+0]=0.35;
fb3x4[2*4+1]=0.15;
fb3x4[2*4+2]=0.35;
fb3x4[2*4+3]=0.15;
printf("\tt\tS\tdS\tdN\tdN/dS\tS4\td4\tk_4f\tpT_4f\n");
zero(com.pi,64);
FOR(k,64) if(FROM64[k]>-1)
com.pi[FROM64[k]]=fb3x4[k/16]*fb3x4[4+(k/4)%4]*fb3x4[8+k%4];
pstop=1-sum(com.pi,n);
abyx(1/(1-pstop),com.pi,n);
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr, &com.kappa,com.omega,PMat);
matout(frst,com.pi,16,4);
FOR(ii,nii) {
t=t0[ii];
EigenQcodon(2,t,&S,&dS,&dN,NULL,NULL,NULL, &mr, &com.kappa,com.omega,PMat);
PMatUVRoot (PMat, t, n, U, V, Root);
if(testTransP(PMat,n)) error2("testP");
matout(frst,PMat,n,n);
for(i=0,zero(fij4f,16);i<n;i++) {
ic1=FROM61[i]; b[0][0]=ic1/16; b[0][1]=(ic1/4)%4; b[0][2]=ic1%4;
iaa=GeneticCode[com.icode][ic1];
ic1-=b[0][2];
FOR(k,4) if(GeneticCode[com.icode][ic1+k]!=iaa) break;
if(k<4) continue;
FOR(j,n) {
fij=com.pi[i]*PMat[i*n+j];
ic2=FROM61[j]; b[1][0]=ic2/16; b[1][1]=(ic2/4)%4; b[1][2]=ic2%4;
if(b[0][0]!=b[1][0] || b[0][1]!=b[1][1]) continue;
fij4f[b[0][2]*4+b[1][2]] += fij;
/* printf("%c %s %s %.8f\n",AAs[iaa],getcodon(str1,ic1+b[0][2]),getcodon(str2,ic2),fij);
*/
}
}
pS4=sum(fij4f,16)/3;
abyx(1/sum(fij4f,16),fij4f,16);
FOR(k,4) pi4f[k]=sum(fij4f+k*4,4);
/* matout(F0,fij4f,4,4); */
d4 = distanceHKY85 (fij4f, &kappa4fold, 0);
dN_dS = (dS>0 ? dN/dS : -1);
printf("\t%.4f\t%.5f\t%.5f\t%.5f\t%.5f\t%.3f\t%.5f\t%.3f\t%.4f\n",
t,S/3,dS,dN,dN_dS, pS4,d4,kappa4fold,pi4f[0]);
}
printf("\nproportion of stop codons: %.4f\n", pstop);
exit(0);
}
double distanceHKY85 (double x[], double *kappa, double alpha)
{
/* This is from SeqDivergence(), copied here to avoid linking to SeqDivergence.
*/
int i,j;
double p[4], Y,R, a1,a2,b, P1,P2,Q,tc,ag;
double largek=999, larged=9;
if (testXMat(x) && noisy) {
matout(F0,x,4,4);
puts("X err.. Perhaps no sites to compare?");
}
*kappa=0;
for (i=0,zero(p,4); i<4; i++) {
FOR (j,4) { p[i]+=x[i*4+j]/2; p[j]+=x[i*4+j]/2; }
}
P1=x[0*4+1]+x[1*4+0];
P2=x[2*4+3]+x[3*4+2];
Q = x[0*4+2]+x[0*4+3]+x[1*4+2]+x[1*4+3]+ x[2*4+0]+x[2*4+1]+x[3*4+0]+x[3*4+1];
Y=p[0]+p[1];
R=p[2]+p[3];
if(P1+P2+Q<1e-100) {
*kappa=-1; return(0);
}
tc=p[0]*p[1];
ag=p[2]*p[3];
a1=1-Y*P1/(2*tc)-Q/(2*Y);
a2=1-R*P2/(2*ag)-Q/(2*R);
b=1-Q/(2*Y*R);
if (a1<=0 || a2<=0 || b<=0) return (larged);
if (alpha<=0) { a1=-log(a1); a2=-log(a2); b=-log(b); }
else { a1=-gammap(a1,alpha); a2=-gammap(a2,alpha); b=-gammap(b,alpha);}
a1 = -R/Y*b + a1/Y;
a2 = -Y/R*b + a2/R;
if (b>0) *kappa = min2((a1+a2)/(2*b), largek);
return 2*(p[0]*p[1] + p[2]*p[3])*(a1+a2)/2 + 2*Y*R*b;
}
void get_pclassM_iw_M2M8(int *iw, double *pclassM,
int iclassM, int ip[], double para[4][100], int n1d, int M2a, int ternary);
void get_grid_para_like_M2M8(double para[4][100], int n1d, int dim, int M2a, int ternary,
double p0b[], double p1b[], double w0b[], double wsb[],
double p_beta_b[], double q_beta_b[], double x[], double *S);
void GetIndexTernary(int *ix, int *iy, double *x, double *y, int itriangle, int K);
void get_grid_para_like_M2M8 (double para[4][100], int n1d, int dim, int M2a, int ternary,
double p0b[], double p1b[], double w0b[], double wsb[],
double p_beta_b[], double q_beta_b[], double x[], double *S)
{
/* This sets up the grid (para[][]) according to the priors. It also copies all
possible w values into com.rK[].
The bounds on parameters are used to set up the uniform priors for parameters.
*/
int i,k,h, site=10;
double fh;
if(com.NSsites==NSbetaw) /* can't control the range of w from the beta */
{ w0b[0]=0; w0b[1]=1; }
for(i=0; i<n1d; i++) {
para[0][i] = p0b[0]+(i+0.5)*(p0b[1]-p0b[0])/n1d; /* p0 */
if(com.NSsites==2) { /* M2 & M2a */
para[1][i] = p1b[0]+(i+0.5)*(p1b[1]-p1b[0])/n1d; /* p1 */
if(ternary) para[0][i] = para[1][i] = -1;
if(M2a)
para[2][i] = w0b[0]+(i+0.5)*(w0b[1]-w0b[0])/n1d; /* w0 */
para[2+M2a][i] = wsb[0]+(i+0.5)*(wsb[1]-wsb[0])/n1d; /* w2 */
}
else { /* M8 */
para[1][i] = p_beta_b[0]+(i+0.5)*(p_beta_b[1]-p_beta_b[0])/n1d; /* p */
para[2][i] = q_beta_b[0]+(i+0.5)*(q_beta_b[1]-q_beta_b[0])/n1d; /* q */
para[3][i] = wsb[0]+(i+0.5)*(wsb[1]-wsb[0])/n1d; /* ws */
}
}
k=0;
if(com.NSsites==2 && M2a==0)
com.rK[k++]=0;
else /* w0 in M2a or w0 from beta in M8 */
for(i=0; i<n1d; i++)
com.rK[k++] = w0b[0]+(i+0.5)*(w0b[1]-w0b[0])/n1d;
if(com.NSsites==2)
com.rK[k++]=1; /* w1 for M2 & M2a */
for(i=0; i<n1d; i++)
com.rK[k++] = wsb[0]+(i+0.5)*(wsb[1]-wsb[0])/n1d; /* w2 in M2 or ws */
/* calculates the likelihood com.fhK[] */
printf("\nCalculating f(x_h|w): %d categories %d w sets.\n", n1d, com.ncatG);
com.conPSiteClass=0; *S=0;
fx_r(x,-1);
if(noisy>3)
for(k=0; k<com.ncatG; k++)
printf("S%d w log{f(x|w)}: %9.4f %12.6f\n",
site,com.rK[k], (com.NnodeScale?com.fhK[k*com.npatt+site]:log(com.fhK[k*com.npatt+site])));
if(com.NnodeScale)
for(h=0; h<com.npatt; h++) {
for(k=1,fh=com.fhK[h]; k<com.ncatG; k++)
fh = max2(fh,com.fhK[k*com.npatt+h]);
for(k=0; k<com.ncatG; k++)
com.fhK[k*com.npatt+h] = exp(com.fhK[k*com.npatt+h]-fh);
*S += fh*com.fpatt[h];
}
else
for(h=0; h<com.npatt; h++) {
for(k=1,fh=com.fhK[h]; k<com.ncatG; k++)
fh = max2(fh,com.fhK[k*com.npatt+h]);
for(k=0; k<com.ncatG; k++)
com.fhK[k*com.npatt+h] /= fh;
*S += log(fh)*com.fpatt[h];
}
}
void get_pclassM_iw_M2M8(int *iw, double *pclassM,
int iclassM, int ip[], double para[][100], int n1d, int M2a, int ternary)
{
/* Given the point on the grid (ip[]), this returns iw and pclassM, where iw
locates the w ratio in com.rK[] and f(x_h|w) stored in com.fhK[],
and pclassM is the proportion of the site class under the model.
Look at get_grid_para_like() for more info about the setup of com.rK[], which
accounts for the setup of iw here in this function.
M8 used to use 10 categories to approximate the beta, each of probability
10%. Here we use n1d categories, equally spaced, and the
probabilities for categories are calculated using CDFBeta.
Parameters for grid integration:
Parameters Parameter dependence
Model 0 1 2 3 iw pclassM
-------------------------------------------------------------------
M2 p0 p1 w2 iclassM w0 w2 iclassM p0 p1
M2a p0 p1 w0 w2 iclassM w2 iclassM p0 p1
M8 p0 p q ws iclassM p q ws iclassM p0 p q
-------------------------------------------------------------------
If M2 or M2a and ternary, the n1d*n1d grid for p0-p1 is mapped onto the
triangle specified by p0-p1-p2. First the index i and j are retrieved
from the label for the point (ip[0]*n1d+ip[1]). Then the coordinates
p0 and p1 at the point is worked out. With this scheme, p0 and p1 each
takes on 2*n1d-1 possible values.
*/
int i,j;
double p0,p1, p,q, cdf0=0,cdf1=1;
if(com.NSsites==NSpselection) { /* M2 & M2a */
if(ternary) {
GetIndexTernary(&i, &j, &p0, &p1, ip[0]*n1d+ip[1], n1d);
*pclassM = (iclassM==0 ? p0 : (iclassM==1 ? p1 : 1-p0-p1));
}
else {
if(iclassM<2) *pclassM = para[iclassM][ip[iclassM]]; /* p0 or p1 */
else *pclassM = 1-para[0][ip[0]]-para[1][ip[1]]; /* p2 */
*pclassM = max2(*pclassM,0);
}
if(M2a==0) { /*M2 */
if(iclassM<2) *iw = iclassM; /* w0 or w1 */
else *iw = 2+ip[2]; /* w2 */
}
else { /* M2a */
if(iclassM==0) *iw = ip[2]; /* w0 */
else if(iclassM==1) *iw = n1d; /* w1 */
else *iw = n1d+1+ip[3]; /* w2 */
}
}
else { /* M8 */
p0 = para[0][ip[0]];
if(iclassM<n1d) { /* w from beta */
p = para[1][ip[1]];
q = para[2][ip[2]];
if(iclassM>0) cdf0 = CDFBeta(iclassM/(double)n1d, p, q, 0);
if(iclassM<n1d-1) cdf1 = CDFBeta((iclassM+1.0)/n1d, p, q, 0);
*pclassM = p0*(cdf1-cdf0);
*iw = iclassM;
}
else { /* ws */
*pclassM = 1-p0;
*iw = n1d+ip[3];
}
}
}
int lfunNSsites_M2M8 (FILE* frst, double x[], int np)
{
/* Bayes empirical Bayes (BEB) correction for the posterior of w for each site
under M2 or M8. The integral is 3-d for M2, and 4-d for M2a or M8,
approximated using n1d=10 categories in each dimension. The ngrid=n1d^dim
points make up the grid.
com.ncatG is the number of all possible w's ever needed. They are copied
into com.rK[], to be used to calculate f(x_h|w), stored in com.fhK[], before
entering the grid of 4-d integration. iw[ngrid*nclassM] identifies the
position of w in com.rK[], and pclassM[ngrid*nclassM] is the proportion
of sites under the model. Those are set up in get_pclassM_iw().
The priors are set up in get_grid_para_like(). See notes there.
Some control variables:
M2a=1 for M2a=0 for M2.
ternary=1: use ternary triangles to specify prior for p0-p1 under M2 or M2a
=0: break p0 and p1 into 10 bins and skip the unfeasible points
Parameters and their priors are as follows:
M2 (p0 p1 w2) : p0,p1~U(0,1), w2~U(1,11)
M2a(p0 p1 w0 w2): p0,p1~U(0,1), w0~U(0,1), w2~U(1,11)
M8 (p0 p q ws): p0~U(0,1), p,q~U(0,2), ws~U(1,11)
Ziheng, Copenhagen, 17 May 2004.
*/
int n1d=10, M2a=1, ternary=1, trianglePriorM8=0;
double p0b[]={0,1}, p1b[]={0,1}, w0b[]={0,1}; /* w0b for M2a only. */
double wsb[]={1,11}; /* for w2 in M2 & M2a, or for ws in M8 */
double p_beta_b[]={0,2}, q_beta_b[]={0,2};
int dim=(com.NSsites==8||M2a?4:3), ngrid,igrid, ip[4]={0}, j,k,h, it;
int refsp=0, ncatG0=com.ncatG;
/* # of site classes under model and index for site class */
int nclassM = (com.NSsites==NSpselection?3:n1d+1), iclassM, *iw;
double para[4][100]={{0}}, postpara[4][100]; /* paras on grid for 4-d integral: n1d<=100! */
/* lnfXs is log of term in equation 5 in YWN05, which sums over those terms. */
double fh, fX, *lnfXs,S1,S2, *lnprior, *pclassM, *meanw, *varw, *postSite, *postp0p1=NULL;
double fh1site, t,v;
char timestr[32], *paras[4];
printf("\nBEBing (dim = %d). This may take several minutes.", dim);
if(com.NSsites==8) { paras[0]="p0"; paras[1]="p"; paras[2]="q"; paras[3]="ws"; }
else if(!M2a) { paras[0]="p0"; paras[1]="p1"; paras[2]="w2"; }
else { paras[0]="p0"; paras[1]="p1"; paras[2]="w0"; paras[3]="w2"; }
ngrid=n1d*n1d*n1d*(dim==4?n1d:1);
if(com.NSsites==8) com.ncatG = n1d+n1d; /* w from beta & ws */
else com.ncatG = (M2a ? n1d+1+n1d : 2+n1d); /* w0, w1=1, w2 */
if((meanw=(double*)malloc(com.npatt*(2+nclassM)*sizeof(double)))==NULL)
error2("oom meanw");
varw=meanw+com.npatt; postSite=varw+com.npatt;
ternary=(com.NSsites==2 && ternary);
if(ternary && (postp0p1=(double*)malloc(n1d*n1d*sizeof(double)))==NULL)
error2("oom postp0p1");
if((lnfXs=(double*)malloc(ngrid*sizeof(double)))==NULL)
error2("oom lnfXs");
if((pclassM=(double*)malloc(ngrid*nclassM*(sizeof(double)+sizeof(int))))==NULL)
error2("oom pclassM"); /* this wastes space */
iw = (int*)(pclassM+ngrid*nclassM);
if((lnprior=(double*)malloc(n1d*n1d*sizeof(double)))==NULL)
error2("oom lnprior"); /* this wastes space */
k=com.npatt*com.ncatG*sizeof(double);
if((com.fhK=(double*)realloc(com.fhK,k))==NULL) error2("oom fhK");
for(j=0; j<n1d*n1d; j++) lnprior[j]=0;
if(com.NSsites==8 && trianglePriorM8) {
/* for(j=0; j<n1d; j++) lnprior[j]=(2-1./n1d-j*2./n1d)/n1d; */
for(j=0; j<n1d; j++) lnprior[j]=(2*j+1.)/(n1d*n1d);
printf("triangular prior for p0 under M8\n");
for(j=0; j<n1d; j++) printf("%9.4f", (2*j+1.)/(2*n1d)); FPN(F0);
for(j=0; j<n1d; j++) printf("%9.4f", lnprior[j]); FPN(F0);
}
BayesEB=1;
get_grid_para_like_M2M8(para, n1d, dim, M2a, ternary, p0b, p1b, w0b, wsb, p_beta_b, q_beta_b, x, &S1);
/* Set up im and pclassM, for each igrid and iclassM. */
for(igrid=0; igrid<ngrid; igrid++) {
for(j=dim-1,it=igrid; j>=0; j--) { ip[j]=it%n1d; it/=n1d; }
if(com.NSsites==2 && !ternary && para[0][ip[0]]+para[1][ip[1]]>1) continue;
for(k=0; k<nclassM; k++) {
get_pclassM_iw_M2M8(&iw[igrid*nclassM+k], &pclassM[igrid*nclassM+k],k,ip,para,n1d,M2a,ternary);
}
}
/* calculate log{fX}, where fX is the marginal probability of data,
and posterior of parameters postpara[]. S2 is the scale factor. */
printf("Calculating f(X), the marginal probability of data.\n");
fX=1; S2=-1e300;
FOR(j,dim) FOR(k,n1d) postpara[j][k]=1;
if(ternary) FOR(k,n1d*n1d) postp0p1[k]=1;
for(igrid=0; igrid<ngrid; igrid++) {
for(j=dim-1,it=igrid; j>=0; j--) { ip[j]=it%n1d; it/=n1d; }
if(com.NSsites==2 && !ternary && para[0][ip[0]]+para[1][ip[1]]>1)
continue;
for(h=0,lnfXs[igrid]=0; h<com.npatt; h++) {
for(k=0,fh=0; k<nclassM; k++)
fh += pclassM[igrid*nclassM+k]*com.fhK[iw[igrid*nclassM+k]*com.npatt+h];
if(fh<1e-300) {
printf("strange: f[%3d] = %12.6g very small.\n",h,fh);
continue;
}
lnfXs[igrid] += log(fh)*com.fpatt[h];
}
lnfXs[igrid] += (com.NSsites==8 ? lnprior[ip[0]] : lnprior[ip[0]*n1d+ip[1]]);
t=lnfXs[igrid]-S2;
if(t>0) { /* change scale factor S2 */
t = (t<200 ? exp(-t) : 0);
fX=fX*t+1;
FOR(j,dim) FOR(k,n1d)
postpara[j][k] *= t;
FOR(j,dim)
postpara[j][ip[j]] ++;
if(ternary) {
FOR(k,n1d*n1d) postp0p1[k] *= t;
postp0p1[ip[0]*n1d+ip[1]] ++;
}
S2 = lnfXs[igrid];
}
else if(t>-200) {
t = exp(t);
fX += t;
for(j=0; j<dim; j++)
postpara[j][ip[j]] += t;
if(ternary) postp0p1[ip[0]*n1d+ip[1]] += t;
}
}
for(j=0; j<dim; j++)
for(k=0; k<n1d; k++)
postpara[j][k]/=fX;
if(ternary)
for(k=0; k<n1d*n1d; k++)
postp0p1[k] /=fX;
fX = log(fX)+S2;
printf("\tlog(fX) = %12.6f S = %12.6f %12.6f\n", fX+S1-dim*log(n1d*1.),S1,S2);
/* calculate posterior probabilities and mean w for each site pattern.
S1 and S2 are scale factors for probabilities and for w. */
printf("Calculating f(w|X), posterior probabilities of site classes.\n");
for(h=0; h<com.npatt; h++) {
S1=-1e300; FOR(j,nclassM) postSite[j*com.npatt+h]=1;
S2=-1e300; meanw[h]=varw[h]=1;
for(iclassM=0; iclassM<nclassM; iclassM++) {
for(igrid=0; igrid<ngrid; igrid++) {
for(j=dim-1,it=igrid; j>=0; j--) { ip[j]=it%n1d; it/=n1d; }
if(com.NSsites==2 && !ternary && para[0][ip[0]]+para[1][ip[1]]>1)
continue;
for(k=0,fh=0; k<nclassM; k++) /* duplicated calculation */
fh += pclassM[igrid*nclassM+k]*com.fhK[iw[igrid*nclassM+k]*com.npatt+h];
it = igrid*nclassM+iclassM;
fh1site = pclassM[it]*com.fhK[iw[it]*com.npatt+h];
if(fh1site<1e-300) continue;
fh1site /= fh;
t = log(fh1site)+lnfXs[igrid]; /* t is log of term on grid */
if(t>S1) { /* change scale factor S1 */
for(j=0; j<nclassM; j++)
postSite[j*com.npatt+h] = postSite[j*com.npatt+h]*exp(S1-t);
S1 = t;
}
postSite[iclassM*com.npatt+h] += exp(t-S1);
t = fh1site*com.rK[iw[it]];
v = fh1site*square(com.rK[iw[it]]);
if(t<1e-300) continue;
t = log(t)+lnfXs[igrid]; /* t is log of mean */
v = log(v)+lnfXs[igrid];
if(t>S2) { /* change scale factor S2 */
meanw[h] = meanw[h]*exp(S2-t);
varw[h] = varw[h]*exp(S2-t);
S2 = t;
}
meanw[h] += exp(t-S2);
varw[h] += exp(v-S2);
}
}
for(j=0; j<nclassM; j++)
postSite[j*com.npatt+h] *= exp(S1-fX);
meanw[h] *= exp(S2-fX);
varw[h] *= exp(S2-fX);
varw[h] -= meanw[h]*meanw[h];
varw[h] = (varw[h]>0?sqrt(varw[h]):0);
if((h+1)%10==0 || h==com.npatt-1)
printf("\r\tdid %3d / %3d patterns %s", h+1,com.npatt,printtime(timestr));
} /* for(h) */
/* print out posterior probabilities */
fprintf(frst,"\nBayes Empirical Bayes (BEB) probabilities for %d classes (class)", nclassM);
fprintf(fout,"\nBayes Empirical Bayes (BEB) analysis");
fprintf(fout," (Yang, Wong & Nielsen 2005. Mol. Biol. Evol. 22:1107-1118)");
com.ncatG = ncatG0;
PrintProbNSsites(frst, postSite, meanw, varw, nclassM, refsp);
fprintf(fout, "\n\nThe grid %s\n\n", (ternary?"(see ternary graph for p0-p1)":""));
for(j=(ternary?2:0); j<dim; j++,FPN(fout)) {
fprintf(fout, "%-2s: ", paras[j]);
for(k=0; k<n1d; k++)
fprintf(fout, " %6.3f", para[j][k]);
}
if(ternary) for(k=0; k<n1d; k++) postpara[0][k]=postpara[1][k]=-1;
fprintf(fout, "\n\nPosterior on the grid\n\n");
for(j=(ternary?2:0); j<dim; j++,FPN(fout)) {
fprintf(fout, "%-2s: ", paras[j]);
for(k=0;k<n1d;k++)
fprintf(fout, " %6.3f", postpara[j][k]);
}
if(ternary) {
fprintf(fout,"\nPosterior for p0-p1 (see the ternary graph)\n\n");
for(k=0;k<n1d*n1d;k++) {
fprintf(fout," %5.3f", postp0p1[k]);
if(fabs(square((int)sqrt(k+1.))-(k+1))<1e-5) FPN(fout);
}
fprintf(fout,"\nsum of density on p0-p1 = %10.6f\n", sum(postp0p1,n1d*n1d));
}
BayesEB = 0;
free(meanw); free(lnfXs); free(pclassM); free(lnprior);
if(ternary) free(postp0p1);
return(0);
}
/********************************************************************/
void get_grid_para_like_AC(double para[][100], int n1d, int dim,
double w0b[], double w2b[], double x[], double *S);
void get_pclassM_iw_AC(int *iw, double *pclassM, int iclassM, int ip[], double para[][100], int n1d);
void get_grid_para_like_AC(double para[][100], int n1d, int dim,
double w0b[], double w2b[], double x[], double *S)
{
/* This sets up the grid (para[][]) according to the priors.
It calculates the probability of data at each site given w: f(f_h|w).
This is calculated using the branch model (NSsites = 0 model = 2), with
BayesEB=2 used to force the use of the correct scale factors in GetPMatBranch().
Order of site classes for iw or f(x_h|w):
fore back #sets
Branchsite A (121 sets)
site class 0: w0 w0 10
site class 1: w1=1 w1=1 1
site class 2a: w0 w2 100
site class 2b: w1=1 w2 10
Clade C (111 sets)
site class 0: w0 w0 10
site class 1: w1=1 w1=1 1
site class 2: w2 w3 10*10*10...
*/
int modelA=(com.model==2), i,k,h, iw, site=10;
double fh, wbranches[NBTYPE]; /* w for back and fore branches */
int NSsites0=com.NSsites, model0=com.model;
for(i=0; i<n1d; i++) {
para[0][i] = para[1][i] = -1; /* p0 & p1 */
para[2][i] = w0b[0] + (i+0.5)*(w0b[1]-w0b[0])/n1d; /* w0 */
para[3][i] = w2b[0] + (i+0.5)*(w2b[1]-w2b[0])/n1d; /* w2 */
if(com.model==3) /* w3 w4 ... in model C */
for(k=1; k<com.nbtype; k++)
para[3+k][i] = para[3][i];
}
/* calculates the likelihood com.fhK[] */
printf("\nCalculating f(x_h|w) for %d w's\n", com.ncatG);
com.conPSiteClass = 0;
*S = 0;
com.model = 2;
com.NSsites = 0;
com.pomega = wbranches;
for(iw=0; iw<com.ncatG; iw++) {
if(modelA) { /* model A: 10 + 1 + 100 + 10 */
if(iw<n1d) wbranches[0] = wbranches[1] = para[2][iw]; /* class 0: w0 */
else if(iw==n1d) wbranches[0] = wbranches[1] = 1; /* class 1: w1 */
else if(iw<n1d+1+n1d*n1d) { /* class 2a: w0 w2 */
wbranches[0] = para[2][(iw-n1d-1)/n1d];
wbranches[1] = para[3][(iw-n1d-1)%n1d];
}
else { /* class 2b: w1 w2 */
wbranches[0] = 1;
wbranches[1] = para[3][iw-n1d-1-n1d*n1d];
}
}
else { /* model C: 10 + 1 + 10*10*... */
if(iw<n1d) /* class 0: w0 */
for(i=0; i<com.nbtype; i++) wbranches[i] = para[2][iw];
else if(iw==n1d) /* class 1: w1 */
for(i=0; i<com.nbtype; i++) wbranches[i] = 1;
else { /* class 2: w2 w3 */
for(i=com.nbtype-1,k=iw-n1d-1; i>=0; i--) {
wbranches[i] = para[3+i][k%n1d];
k /= n1d;
}
}
/*
printf("\nw%-2d: ", iw+1);
for(i=0; i<com.nbtype; i++) printf(" %10.6f", wbranches[i]);
*/
}
ConditionalPNode(tree.root, 0, x);
for(h=0; h<com.npatt; h++) {
for(i=0,fh=0; i<com.ncode; i++)
fh += com.pi[i]*nodes[tree.root].conP[h*com.ncode+i];
if(fh<=0) {
if(fh<-1e-5) printf("\nfh = %.6f negative\n",fh);
fh=1e-80;
}
fh = log(fh);
for(k=0; k<com.NnodeScale; k++)
fh += com.nodeScaleF[k*com.npatt+h];
com.fhK[iw*com.npatt+h] = fh;
}
if((iw+1)%10==0 || iw==com.ncatG-1)
printf("\r\t %4d / %d sets.", iw+1, com.ncatG);
}
FPN(F0);
for(h=0,*S=0; h<com.npatt; h++) {
for(k=1,fh=com.fhK[h]; k<com.ncatG; k++)
fh = max2(fh,com.fhK[k*com.npatt+h]);
for(k=0; k<com.ncatG; k++)
com.fhK[k*com.npatt+h] = exp(com.fhK[k*com.npatt+h]-fh);
*S += fh*com.fpatt[h];
}
com.NSsites=NSsites0; com.model=model0;
}
void get_pclassM_iw_AC(int *iw, double *pclassM, int iclassM, int ip[], double para[][100], int n1d)
{
/* Given the point on the grid ip[] and iclassM, this returns iw and pclassM,
where iw locates the correct f(x_h|w) stored in com.fhK[], and pclassM is
the proportion of the site class under the model.
The n1d*n1d grid for p0-p1 is mapped onto the ternary graph for p0-p1-p2.
See get_grid_para_like_AC() for order of iw or site classes.
Parameters are model A: (p0 p1 w0 w2)
model C: (p0 p1 w0 w2 w3 ...)
*/
int modelA=(com.model==2), i,j;
double p[3];
GetIndexTernary(&i, &j, &p[0], &p[1], ip[0]*n1d+ip[1], n1d);
p[2] = 1-p[0]-p[1];
*pclassM = p[iclassM<=2 ? iclassM : 2];
if(modelA && iclassM>=2) *pclassM = p[2]*p[iclassM-2]/(1-p[2]);
if(iclassM==0) *iw = ip[2]; /* class 0: w0 */
else if(iclassM==1) *iw = n1d; /* class 1: w1 */
else if(modelA==0) { /* clade model C site class 2: w2 w3 w4 ... */
for(i=0,*iw=0; i<com.nbtype; i++)
*iw = *iw*n1d + ip[3+i];
*iw += n1d+1;
}
else if(iclassM==2) *iw = n1d+1+ip[2]*n1d+ip[3]; /* class 2a model A: w0 & w2 */
else *iw = n1d+1+n1d*n1d+ip[3]; /* class 2b model A: w1 & w2 */
}
int lfunNSsites_AC (FILE* frst, double x[], int np)
{
/* Bayes empirical Bayes (BEB) calculation of posterior probabilities for site
classes under the branch-site model A (Yang & Nielsen 2002) and clade model C
(Bielawski & Yang 2004). The dimension of integral is 4 for A and (3+nbtype)
for C. Each dimension is approximated using n1d=10 categories, and the grid
is made up of ngrid=n1d^dim points.
For branch-site model A, the probability of data at a site f(x_h|w) needs to
be calculated for 121=(d+1+d*d+d) sets of w's. For model C, it needs to be
calculated for 111 (d+1+d^nbtype) sets.
Those are calculated and stored in com.fhK[], before entering the grid.
iw[ngrid*nclassM] identifies the right f(x_h|w), and pclassM[ngrid*nclassM]
is the proportion of sites under the model, f(w|ita). Those are set up in
get_pclassM_iw_AC().
The priors are set up in get_grid_para_like_AC(). See notes there.
Parameters and priors are as follows:
model A (p0 p1 w0 w2): p0,p1~U(0,1), w0~U(0,1), w2~U(1,11)
model C (p0 p1 w0 w2 w3): p0,p1~U(0,1), w0~U(0,1), w2,w3~U(0,5)
Ziheng, UCL, 22 September 2004, modified Nov 2008 to use more than 2 branch types
under clade model C.
*/
int n1d=10, debug=0, site=10;
double w0b[]={0,1}; /* w0b for w0. */
double wsb[]={1,11}; /* for w2 in model A */
double w2b[]={0,3}; /* for w2-w3-w4 in model C */
int modelA=(com.model==2), dim=(modelA?4:3+com.nbtype), ngrid,igrid, ip[3+NBTYPE], j,k,h,hp,it;
int refsp=0, ncatG0=com.ncatG, lst=(com.readpattern?com.npatt:com.ls);
/* # of site classes under model and index for site class */
int nclassM = (modelA?4:3), iclassM, *iw, i;
double para[3+NBTYPE][100]={{0}}, postpara[3+NBTYPE][100]; /* paras on grid : n1d<=100! */
double fh, fX, *lnfXs,S1,S2, *pclassM, *postSite, *postp0p1;
double fhk[3], t, cutoff=0.5;
char timestr[32], paras[3+NBTYPE][5]={"p0","p1","w0","w2","w3"}, *sig, aa;
printf("\nBEBing (dim = %d). This may take many minutes.", dim);
if(!modelA)
for(i=2; i<com.nbtype; i++) sprintf(paras[3+i], "w%d", i+2);
for(i=0,ngrid=1; i<dim; i++) ngrid *= n1d;
if(modelA)
com.ncatG = n1d + 1 + n1d*n1d + n1d; /* branch-site model A: table 1 YWN05 */
else { /* clade model C: table 2 YWN05 */
for(i=0,com.ncatG=1; i<com.nbtype; i++) com.ncatG *= n1d; /* w2 w3 w4 ... */
com.ncatG += n1d + 1; /* w0 & w1=1 */
}
k = (n1d*n1d + com.npatt*nclassM + ngrid + ngrid*nclassM)*sizeof(double)
+ ngrid*nclassM*sizeof(int);
if(noisy) printf("\nTrying to get %dM memory in lfunNSsites_A\n", k);
if((postp0p1=(double*)malloc(k)) == NULL)
error2("oom in lfunNSsites_AC");
postSite = postp0p1 + n1d*n1d;
lnfXs = postSite + com.npatt*nclassM;
pclassM = lnfXs + ngrid;
iw = (int*)(pclassM + ngrid*nclassM);
k = com.npatt*com.ncatG*sizeof(double);
if((com.fhK=(double*)realloc(com.fhK,k)) == NULL) error2("oom fhK");
BayesEB = 2;
get_grid_para_like_AC(para, n1d, dim, w0b, (modelA?wsb:w2b), x, &S1);
/* Set up im and pclassM, for each igrid and iclassM. */
for(igrid=0; igrid<ngrid; igrid++) {
for(j=dim-1,it=igrid; j>=0; j--) { ip[j]=it%n1d; it/=n1d; }
for(k=0; k<nclassM; k++) {
get_pclassM_iw_AC(&iw[igrid*nclassM+k], &pclassM[igrid*nclassM+k],k,ip,para,n1d);
}
}
/* calculate marginal prob of data, fX, and postpara[]. S2 is scale. */
printf("Calculating f(X), the marginal probability of data.\n");
fX=1; S2=-1e300;
for(j=0; j<dim; j++) /* postpara[0-1] for p0p1 ignored */
for(k=0; k<n1d; k++)
postpara[j][k] = 1;
for(k=0; k<n1d*n1d; k++)
postp0p1[k] = 1;
for(igrid=0; igrid<ngrid; igrid++) {
for(j=dim-1,it=igrid; j>=0; j--) {
ip[j]=it%n1d;
it/=n1d;
}
for(h=0,lnfXs[igrid]=0; h<com.npatt; h++) {
for(k=0,fh=0; k<nclassM; k++)
fh += pclassM[igrid*nclassM+k]*com.fhK[iw[igrid*nclassM+k]*com.npatt+h];
if(fh<1e-300) {
printf("strange: f[%3d] = %12.6g very small.\n",h,fh);
continue;
}
lnfXs[igrid] += log(fh)*com.fpatt[h];
}
t = lnfXs[igrid]-S2;
if(t>0) { /* change scale factor S2 */
t = (t<200 ? exp(-t) : 0);
fX = fX*t+1;
for(j=0; j<dim; j++) for(k=0; k<n1d; k++)
postpara[j][k] *= t;
for(k=0; k<n1d*n1d; k++)
postp0p1[k] *= t;
for(j=0; j<dim; j++)
postpara[j][ip[j]] ++;
postp0p1[ip[0]*n1d+ip[1]] ++;
S2 = lnfXs[igrid];
}
else if(t>-200) {
t = exp(t);
fX += t;
for(j=0; j<dim; j++)
postpara[j][ip[j]] += t;
postp0p1[ip[0]*n1d+ip[1]] += t;
}
if((igrid+1)%500==0 || igrid==ngrid-1)
printf("\t%3d / %3d grid points\r", igrid+1,ngrid);
}
for(j=0; j<dim; j++) for(k=0; k<n1d; k++)
postpara[j][k] /= fX;
for(k=0; k<n1d*n1d; k++)
postp0p1[k] /=fX;
fX = log(fX)+S2;
printf("\tlog(fX) = %12.6f S = %12.6f %12.6f\n", fX+S1-dim*log(n1d*1.),S1,S2);
/* calculate posterior probabilities for sites. S1 is scale factor */
printf("Calculating f(w|X), posterior probs of site classes.\n");
for(h=0; h<com.npatt; h++) {
S1 = -1e300;
for(j=0; j<nclassM; j++)
postSite[j*com.npatt+h] = 1;
for(igrid=0; igrid<ngrid; igrid++) {
for(j=dim-1,it=igrid; j>=0; j--) { ip[j]=it%n1d; it/=n1d; }
for(k=0,fh=0; k<nclassM; k++) /* duplicated calculation */
fh += fhk[k] = pclassM[igrid*nclassM+k]*com.fhK[iw[igrid*nclassM+k]*com.npatt+h];
for(iclassM=0; iclassM<nclassM; iclassM++) {
fhk[iclassM] /= fh;
t = log(fhk[iclassM]) + lnfXs[igrid]; /* t is log of term on grid */
if(t>S1 + 50) { /* change scale factor S1 */
for(j=0; j<nclassM; j++)
postSite[j*com.npatt+h] *= exp(S1-t);
S1 = t;
}
postSite[iclassM*com.npatt+h] += exp(t-S1);
}
}
for(j=0; j<nclassM; j++)
postSite[j*com.npatt+h] *= exp(S1-fX);
if((h+1)%10==0 || h==com.npatt-1)
printf("\r\tdid %3d / %3d site patterns %s", h+1,com.npatt,printtime(timestr));
} /* for(h) */
if(debug)
for(k=0,printf("\nS%d: ",site); k<nclassM; k++) printf("%7.4f",postSite[k*com.npatt+site]);
/* print out posterior probabilities */
fprintf(frst,"\nBayes Empirical Bayes (BEB) probabilities for %d classes (class)", nclassM);
fprintf(fout,"\nBayes Empirical Bayes (BEB) analysis");
fprintf(fout," (Yang, Wong & Nielsen 2005. Mol. Biol. Evol. 22:1107-1118)");
com.ncatG = ncatG0;
PrintProbNSsites(frst, postSite, NULL, NULL, nclassM, refsp);
if(com.model==2) { /* branch&site model A */
fprintf(fout,"\nPositive sites for foreground lineages Prob(w>1):\n");
for(h=0; h<lst; h++) {
hp = (!com.readpattern ? com.pose[h] : h);
aa = GetAASiteSpecies(refsp, hp);
t = postSite[2*com.npatt+hp] + postSite[3*com.npatt+hp];
if(t>cutoff) {
sig=""; if(t>.95) sig="*"; if(t>.99) sig="**";
fprintf(fout,"%6d %c %.3f%s\n",h+1, aa, t, sig);
}
}
}
fprintf(fout, "\n\nThe grid (see ternary graph for p0-p1)\n\n");
for(j=2; j<dim; j++,FPN(fout)) {
fprintf(fout, "%-2s: ", paras[j]);
for(k=0; k<n1d; k++)
fprintf(fout, " %6.3f", para[j][k]);
}
for(k=0; k<n1d; k++)
postpara[0][k] = postpara[1][k]=-1;
fprintf(fout, "\n\nPosterior on the grid\n\n");
for(j=2; j<dim; j++,FPN(fout)) {
fprintf(fout, "%-2s: ", paras[j]);
for(k=0; k<n1d; k++)
fprintf(fout, " %6.3f", postpara[j][k]);
}
fprintf(fout,"\nPosterior for p0-p1 (see the ternary graph)\n\n");
for(k=0; k<n1d*n1d; k++) {
fprintf(fout," %5.3f", postp0p1[k]);
if(fabs(square((int)sqrt(k+1.))-(k+1))<1e-5) FPN(fout);
}
fprintf(fout,"\nsum of density on p0-p1 = %10.6f\n", sum(postp0p1,n1d*n1d));
free(postp0p1);
BayesEB = 0;
return(0);
}
| aac67e57114c2982253881c61092800d4dd5a2ba.cu | /* codeml.c (aaml.c & codonml.c)
Maximum likelihood parameter estimation for codon sequences (seqtype=1)
or amino-acid sequences (seqtype=2)
Copyright, Ziheng YANG, 1993-2003
cc -o codeml -fast codeml.c tools.o -lm
codeml <ControlFileName>
*/
/*
#define NSSITESBandits
#define DSDN_MC 1
#define DSDN_MC_SITES 1
*/
#include "paml.h"
#define NS 7000
#define NBRANCH (NS*2-2)
#define NNODE (NS*2-1)
#define MAXNSONS 100
#define NGENE 2000
#define LSPNAME 50
#define NCODE 64
#define NCATG 513
#define NBTYPE 17
#define NP (NBRANCH*2+NGENE-1+2+NCODE+2)
/*
#define NP (NBRANCH+NGENE-1+189+2+NCODE+2)
*/
extern char BASEs[],AAs[];
extern int noisy, NFunCall, NEigenQ, NPMatUVRoot, *ancestor, GeneticCode[][64];
extern double *SeqDistance;
int Forestry (FILE *fout);
int GetMemPUVR(int nc, int nUVR);
int sortwM3(double x[]);
void DetailOutput(FILE *fout, double x[], double var[]);
int GetOptions (char *ctlf);
int testx (double x[], int np);
int SetxBound (int np, double xb[][2]);
int SetxInitials (int np, double x[], double xb[][2]);
int GetInitials (double x[], int*fromfile);
double *PointKappa (double xcom[], int igene);
double *PointOmega (double xcom[], int igene, int inode, int isiteclass);
int GetCodonFreqs (void);
int SetParameters (double x[]);
int SetParametersNSsites (double x[]);
int Set_UVR_BranchSite (int iclass, int branchlabel);
int SetPGene (int igene, int _pi, int _UVRoot, int _alpha, double x[]);
int SetPSiteClass(int iclass, double x[]);
int PMatJC69like (double P[], double t, int n);
int printfcode (FILE *fout, double fb61[], double space[]);
int InitializeCodon (FILE *fout, double space[]);
int AA2Codonf (double faa[20], double fcodon[]);
int DistanceMatAA (FILE *fout);
int GetDaa(FILE *fout, double daa[]);
void getpcodonClass(double x[], double pcodonClass[]);
int SelectionCoefficients (FILE* fout, double kappa[], double ppi[], double omega);
int EigenQcodon(int mode, double blength, double *S, double *dS, double *dN,
double Root[], double U[], double V[], double *meanrate, double kappa[], double omega, double Q[]);
int EigenQaa(FILE *fout, double Root[], double U[], double V[],double rate[]);
int Qcodon2aa(double Qc[], double pic[], double Qaa[], double piaa[]);
int SetAA1STEP(void);
int GetOmegaAA(int OmegaAA[]);
int TestModelQc(FILE *fout, double x[]);
double lfun2dSdN(double x[], int np);
int VariancedSdN(double t, double omega, double vtw[2*2], double vdSdN[2*2]);
int GetCodonFreqs2 (void);
int PairwiseCodon(FILE *fout, FILE*fds, FILE*fdn, FILE*dt, double space[]);
int PairwiseAA(FILE *fout, FILE *f2AA);
int lfunNSsites_rate(FILE* fout, double x[], int np);
int lfunNSsites_M2M8(FILE* frst, double x[], int np);
int lfunNSsites_AC(FILE* frst, double x[], int np);
double GetBranchRate(int igene, int ibrate, double x[], int *ix);
int GetPMatBranch(double Pt[], double x[], double t, int inode);
int ConditionalPNode(int inode, int igene, double x[]);
double CDFdN_dS(double x,double par[]);
int DiscreteNSsites(double par[]);
char GetAASiteSpecies(int species, int sitepatt);
void finishup(void);
int mergeSeqs(FILE*fout);
void Get4foldSites(void);
int AdHocRateSmoothing(FILE*fout, double x[NS*3], double xb[NS*3][2], double space[]);
void DatingHeteroData(FILE* fout);
int SlidingWindow(FILE*fout, FILE* fpair[], double space[]);
void SimulateData2s61(void);
void Ina(void);
void d4dSdN(FILE*fout);
struct common_info {
char *z[NS], *spname[NS], seqf[128],outf[128],treef[128],daafile[128], cleandata;
char oldconP[NNODE]; /* update conP for nodes? to save computation */
int seqtype, ns, ls, ngene, posG[NGENE+1], lgene[NGENE], npatt,*pose, readpattern;
int runmode,clock, verbose,print, codonf,aaDist,model,NSsites;
int nOmega, nbtype, nOmegaType; /* branch partition, AA pair (w) partition */
int method, icode, ncode, Mgene, ndata, bootstrap;
int fix_rgene,fix_kappa,fix_omega,fix_alpha,fix_rho,nparK,fix_blength,getSE;
int np, ntime, nrgene, nkappa, npi, nrate, nalpha, ncatG, hkyREV;
size_t sconP, sspace;
double *fpatt, *space, kappa,omega,alpha,rho,rgene[NGENE], TipDate, TipDate_TimeUnit;
double pi[NCODE], piG[NGENE][64], fb61[64];
double f3x4[NGENE][12], *pf3x4, piAA[20];
double freqK[NCATG], rK[NCATG], MK[NCATG*NCATG],daa[20*20], *conP, *fhK;
double (*plfun)(double x[],int np);
double omega_fix; /* fix the last w in the NSbranchB, NSbranch2 models
for lineages. Useful for testing whether w>1 for some lineages. */
int conPSiteClass; /* conPSiteClass=0 if (method==0) and =1 if (method==1)?? */
int NnodeScale;
char *nodeScale; /* nScale[ns-1] for interior nodes */
double *nodeScaleF; /* nScaleF[npatt] for scale factors */
/* pomega & pkappa are used to communicate between SetParameters & ConditionalPNode
& EigenQcodon. Try to remove them? */
double *pomega, pkappa[5], *ppi;
} com;
struct TREEB {
int nbranch, nnode, root, branches[NBRANCH][2];
double lnL;
} tree;
struct TREEN {
int father, nson, sons[MAXNSONS], ibranch, ipop;
double branch, age, omega, *conP, label;
char *nodeStr, fossil, usefossil;
} *nodes, **gnodes, nodes_t[2*NS-1];
/* for sptree.nodes[].fossil: lower, upper, bounds, gamma, inverse-gamma */
enum {LOWER_F=1, UPPER_F, BOUND_F} FOSSIL_FLAGS;
char *fossils[]={" ", "L", "U", "B"};
struct SPECIESTREE {
int nbranch, nnode, root, nspecies, nfossil;
struct TREESPN {
char name[LSPNAME+1], fossil, usefossil; /* fossil: 0, 1, 2, 3 */
int father, nson, sons[2];
double age, pfossil[7]; /* lower and upper bounds or alpha & beta */
double *lnrates; /* log rates for loci */
} nodes[2*NS-1];
} sptree;
/* all trees are binary & rooted, with ancestors unknown. */
struct DATA { /* locus-specific data and tree information */
int ns[NGENE], ls[NGENE], npatt[NGENE], ngene, lgene[NGENE];
int root[NGENE+1], BlengthMethod, fix_nu, nbrate[NGENE], icode[NGENE];
char *z[NGENE][NS], cleandata[NGENE];
char idaafile[NGENE], daafile[NGENE][40];
double *fpatt[NGENE], lnpT, lnpR, lnpDi[NGENE];
double Qfactor[NGENE], pi[NGENE][NCODE];
double rgene[NGENE], kappa[NGENE], alpha[NGENE], omega[NGENE];
int NnodeScale[NGENE];
char *nodeScale[NGENE]; /* nScale[data.ns[locus]-1] for interior nodes */
} data;
extern double Small_Diff;
int Nsensecodon, FROM61[64], FROM64[64], FourFold[4][4];
int ChangedInIteration; /* 1: t changed, update P(t); 2: paras changed, update UVRoot */
double *PMat, *U, *V, *Root, *_UU[NBTYPE+2], *_VV[NBTYPE+2], *_Root[NBTYPE+2];
/* 5 sets for branchsite models (YN2002); 6 sets for clade models */
double pcodon0[64],paa0[20], *pcodonClass; /* for aaDist=FIT1 or FIT2 */
int BayesEB; /* =1 for site models M2a & M8; =2 for branch-site models A & C */
int LASTROUND;
int IClass=-1;
int OmegaAA[190], AA1STEP[190];
double _rateSite=1;
double Qfactor_NS, Qfactor_NS_branch[NBTYPE];
int KGaussLegendreRule=16;
double AAchem[][20+1]={ /* last element is the max */
{8.1, 10.5, 11.6, 13, 5.5, 10.5, 12.3, 9, 10.4, 5.2,
4.9, 11.3, 5.7, 5.2, 8, 9.2, 8.6, 5.4, 6.2, 5.9, 13}, /* p */
{ 31, 124, 56, 54, 55, 85, 83, 3, 96, 111,
111, 119, 105, 132, 32.5, 32, 61, 170, 136, 84, 170}, /* v */
{0, 0.65, 1.33, 1.38, 2.75, 0.89, 0.92, 0.74, 0.58,
0, 0, 0.33, 0, 0, 0.39, 1.42, 0.71, 0.13, 0.2, 0, -999},/* c */
{-0.11, 0.079, -0.136, -0.285, -0.184, -0.067, -0.246, -0.073, 0.32, 0.001,
-0.008, 0.049, -0.041, 0.438, -0.016, -0.153, -0.208, 0.493, 0.381, -0.155} /* a */
}; /* in the order p, v, c, a */
FILE *fout, *frub, *flnf, *frst, *frst1, *frst2=NULL, *finitials;
char *ratef="rates";
enum {Fequal, F1x4, F3x4, Fcodon, F1x4MG, F3x4MG, FMutSel0, FMutSel} CodonFreqs;
char *codonfreqs[]={"Fequal", "F1x4", "F3x4", "Fcodon", "F1x4MG", "F3x4MG", "FMutSel0", "FMutSel"};
enum {NSbranchB=1, NSbranch2, NSbranch3} NSBranchModels;
char *NSbranchmodels[]={"One dN/dS ratio",
"free dN/dS Ratios for branches", "several dN/dS ratios for branches",
"NSbranch3"};
enum {Poisson, EqualInput, Empirical, Empirical_F,
FromCodon=6, REVaa_0=8, REVaa=9} AAModel;
char *aamodels[]={"Poisson", "EqualInput", "Empirical", "Empirical_F", "",
"", "FromCodon", "", "REVaa_0", "REVaa"};
enum {NSnneutral=1, NSpselection, NSdiscrete, NSfreqs, NSgamma, NS2gamma,
NSbeta, NSbetaw, NSbetagamma, NSbeta1gamma, NSbeta1normal, NS02normal,
NS3normal, NSM2aRel=22, NSTgamma, NSTinvgamma, NSTgamma1, NSTinvgamma1} NSsitesModels;
char *NSsitesmodels[]={"one-ratio","NearlyNeutral", "PositiveSelection","discrete","freqs",
"gamma","2gamma","beta","beta&w>1","beta&gamma", "beta&gamma+1",
"beta&normal>1", "0&2normal>0", "3normal>0", "", "", "", "", "", "", "", "",
"M2a_rel", "Tgamma", "Tinvgamma", "Tgamma+1", "Tinvgamma+1"};
int maxNSsitesModels=27;
enum {FIT1=11, FIT2=12} SiteClassModels;
enum {AAClasses=7 } aaDistModels;
char *clockstr[]={"", "Global clock", "Local clock", "ClockCombined"};
enum {GlobalClock=1, LocalClock, ClockCombined} ClockModels;
#define CODEML 1
#include "treesub.c"
#include "treespace.c"
/* variables for batch run of site models */
int ncatG0=10, insmodel=0, nnsmodels=1, nsmodels[15]={0};
/* used for sliding windows analysis */
int windowsize0=20, offset0=1, npositive=0;
double lnLmodel;
int main (int argc, char *argv[])
{
FILE *fseq=NULL, *fpair[6];
char pairfs[6][32]={"2NG.dS","2NG.dN","2NG.t", "2ML.dS","2ML.dN","2ML.t"};
char ctlf[96]="codeml.ctl", *pmodel, timestr[64];
char *seqtypestr[3]={"CODONML", "AAML", "CODON2AAML"};
char *Mgenestr[]={"diff. rate", "separate data", "diff. rate & pi",
"diff. rate & k&w", "diff. rate & pi & k&w"};
int getdistance=1, i, k, s2=0, idata, nc, nUVR, cleandata0;
#ifdef NSSITESBandits
atexit(finishup);
#endif
starttimer();
/*
printf("KGaussLegendreRule? ");
scanf("%d", &KGaussLegendreRule);
*/
com.ndata=1;
noisy=0; com.runmode=0;
com.clock=0; com.fix_rgene=0; /* 0: estimate rate factors for genes */
com.cleandata=0; /* 1: delete; 0:use missing data */
com.seqtype=AAseq;
com.model=Empirical_F;
strcpy(com.daafile, "jones.dat");
com.icode=0; com.nrate=0;
com.fix_kappa=0; com.kappa=1; com.omega=2.1;
com.fix_alpha=1; com.alpha=0.; com.ncatG=4; /* alpha=0 := inf */
com.fix_rho=1; com.rho=0.;
com.getSE=0; com.print=0; com.verbose=1; com.fix_blength=0;
com.method=0; com.space=NULL;
frub=gfopen("rub","w");
frst=gfopen("rst","w");
frst1=gfopen("rst1","w");
/*
mergeSeqs(frst); exit(0);
Ina();
*/
SetSeed(-1, 0);
#if (DSDN_MC || DSDN_MC_SITES)
SimulateData2s61();
#endif
if(argc>1) strncpy(ctlf, argv[1], 95);
GetOptions(ctlf);
cleandata0 = com.cleandata;
if(com.runmode!=-2) finitials=fopen("in.codeml","r");
else getdistance = 1;
fprintf(frst, "Supplemental results for CODEML (seqf: %s treef: %s)\n",
com.seqf, com.treef);
if(com.getSE==2) frst2=fopen("rst2","w");
printf("%s in %s\n", seqtypestr[com.seqtype-1], pamlVerStr);
fout = gfopen(com.outf, "w");
if(noisy && com.seqtype==CODONseq)
{ printcu(F0,NULL,com.icode); puts("Nice code, uuh?"); }
/* space for P&U&V&Root */
if(com.clock==5 || com.clock==6)
DatingHeteroData(fout);
nUVR=1; nc=20;
if(com.seqtype==CODONseq) {
nc = 64;
if(com.model>=1) nUVR = NBTYPE+2;
}
else if (com.seqtype==CODONseq || com.model==FromCodon)
nc = 64;
GetMemPUVR(nc, nUVR);
if((fseq=fopen(com.seqf,"r"))==NULL || com.seqf[0]=='\0') {
printf ("\n\nSequence file %s not found!\n", com.seqf);
exit (-1);
}
/* d4dSdN(fout); */
if (com.aaDist==AAClasses) {
SetAA1STEP();
GetOmegaAA(OmegaAA);
}
else if (com.seqtype==AAseq && com.model==REVaa_0)
SetAA1STEP();
if(com.seqtype==1) {
for(i=0; i<3; i++)
fpair[i]=(FILE*)gfopen(pairfs[i],"w");
if(com.runmode==-2)
for(; i<6;i++) fpair[i]=(FILE*)gfopen(pairfs[i],"w");
}
else if(com.runmode==-2)
fpair[0]=(FILE*)gfopen("2AA.t","w");
for (idata=0; idata<com.ndata; idata++) {
if (com.ndata>1) {
printf ("\nData set %d ", idata+1);
fprintf(fout, "\n\nData set %d\n", idata+1);
fprintf(frst,"\t%d",idata+1);
fprintf(frst1, "%d", idata+1);
fprintf(frub,"\nData set %2d\n",idata+1);
}
if(idata)
GetOptions(ctlf); /* warning: ndata, cleandata etc. are read again. */
if(nnsmodels>1) {
if(com.seqtype!=1) error2("batch run of site models requires codon seqs.");
if(com.fix_omega) error2("fix omega during batch run?");
if(com.model) error2("model should be 0 in the batch run?");
if(com.runmode) error2("runmode?");
/* for allocating memory com.fhK[] */
com.NSsites=NSbetaw; com.ncatG=ncatG0+1;
for(i=0; i<nnsmodels; i++)
if(nsmodels[i]>=NSTgamma || nsmodels[i]<=NSTinvgamma1)
com.ncatG = max2(com.ncatG, KGaussLegendreRule+1);
printf("NSsites batch run (ncatG as in YNGP2000): ");
for(i=0; i<nnsmodels; i++)
printf(" %2d", nsmodels[i]);
FPN(F0);
}
com.cleandata = cleandata0;
/* ReadSeq may change seqtype*/
ReadSeq((com.verbose?fout:NULL), fseq, com.cleandata);
SetMapAmbiguity();
/* AllPatterns(fout); */
fprintf(frst1,"\t%d\t%d\t%d", com.ns, com.ls, com.npatt);
if (com.ngene==1)
com.Mgene = 0;
if(com.ngene>1) {
if(com.seqtype==1 && com.npi)
error2("codon models (estFreq) not implemented for ngene > 1");
if(com.runmode==-2 && com.Mgene!=1) error2("use Mgene=1 for runmode=-2?");
if(com.model) error2("NSbranchsites with ngene.");
if(com.NSsites) error2("NSsites with ngene.");
if(com.aaDist>=FIT1) /* because of pcodon0[] */
{ error2("ngene for amino acid fitness models"); }
}
if(com.ndata==1) fclose(fseq);
i = (com.ns*2-1)*sizeof(struct TREEN);
if((nodes=(struct TREEN*)malloc(i))==NULL)
error2("oom nodes");
pmodel=(com.seqtype==CODONseq?NSbranchmodels[com.model]:aamodels[com.model]);
fprintf(fout,"%s (in %s) %s\n",seqtypestr[com.seqtype-1], pamlVerStr, com.seqf);
fprintf(fout,"Model: %s for branches, ", pmodel);
if(com.clock) fprintf(fout," %s ",clockstr[com.clock]);
if(com.seqtype==CODONseq||com.model==FromCodon) {
if(com.fix_kappa) fprintf(fout, " kappa = %.3f fixed\n", com.kappa);
if(com.fix_omega) fprintf(fout, " omega = %.3f fixed\n", com.omega);
}
if(com.seqtype==AAseq && (com.model==Empirical||com.model==Empirical_F))
fprintf (fout, " (%s) ", com.daafile);
if(com.seqtype==AAseq&&com.nrate) fprintf(fout,"(nrate:%d) ", com.nrate);
if(com.alpha && com.rho) fprintf (fout, "Auto-");
if(com.alpha) fprintf (fout, "dGamma (ncatG=%d) ", com.ncatG);
if(com.ngene>1)
fprintf (fout, " (%d genes: %s) ", com.ngene, Mgenestr[com.Mgene]);
if(com.alpha==0) com.nalpha=0;
else com.nalpha=(com.nalpha?com.ngene:!com.fix_alpha);
if(com.Mgene==1) com.nalpha=!com.fix_alpha;
if(com.nalpha>1 && (!com.alpha || com.ngene==1 || com.fix_alpha))
error2("Malpha");
if(com.nalpha>1 && com.rho) error2("Malpha or rho");
if(com.nalpha>1) fprintf (fout,"(%d gamma)", com.nalpha);
if(com.Mgene && com.ngene==1) error2("Mgene for one gene.");
if(com.seqtype==CODONseq) {
fprintf (fout, "\nCodon frequency model: %s\n", codonfreqs[com.codonf]);
if(com.alpha)
fputs("Warning: Gamma model for codons. See documentation.",fout);
}
if((com.seqtype==CODONseq||com.model==FromCodon)
&& (com.aaDist && com.aaDist<10 && com.aaDist!=AAClasses))
fprintf(fout,"%s, %s\n",com.daafile,(com.aaDist>0?"geometric":"linear"));
if(com.NSsites) {
fprintf(fout,"Site-class models: ");
if (nnsmodels==1) {
fprintf(fout," %s",NSsitesmodels[com.NSsites]);
if(com.NSsites>=NSdiscrete)fprintf(fout," (%d categories)",com.ncatG);
}
if(com.nparK) fprintf(fout," & HMM");
FPN(fout);
if(com.aaDist)
fprintf(fout,"\nFitness models: aaDist: %d\n",com.aaDist);
}
fprintf(fout,"ns = %3d ls = %3d\n\n", com.ns, com.ls);
com.sspace = max2(5000000,3*com.ncode*com.ncode*sizeof(double));
if(com.NSsites) {
if(com.sspace < 2*com.ncode*com.ncode+4*com.npatt*sizeof(double))
com.sspace = 2*com.ncode*com.ncode+4*com.npatt*sizeof(double);
}
k = com.ns*(com.ns-1)/2;
/*
com.sspace=max2(com.sspace,
(int)sizeof(double)*((com.ns*2-2)*(com.ns*2-2+4+k)+k));
*/
if((com.space = (double*)realloc(com.space,com.sspace))==NULL) {
printf("\nfailed to get %9lu bytes for space", com.sspace);
error2("oom space");
}
if(getdistance) {
SeqDistance=(double*)realloc(SeqDistance, k*sizeof(double));
ancestor=(int*)realloc(ancestor, k*sizeof(int));
if(SeqDistance==NULL||ancestor==NULL) error2("oom distance&ancestor");
for(i=0; i<k; i++) SeqDistance[i] = -1;
}
if(com.seqtype==AAseq) {
InitializeBaseAA (fout);
if (com.model==FromCodon /* ||com.aaDist==AAClasses */)
AA2Codonf(com.pi, com.fb61); /* get codon freqs from aa freqs */
}
else { /* codon sequences */
if(com.sspace < max2(com.ngene+1,com.ns)*(64+12+4)*sizeof(double)) {
com.sspace = max2(com.ngene+1,com.ns)*(64+12+4)*sizeof(double);
if((com.space = (double*)realloc(com.space,com.sspace))==NULL)
error2("oom space for #c");
}
if (InitializeCodon(fout,com.space))
error2("giving up on stop codons");
if(com.Mgene==3)
for(i=0; i<com.ngene; i++)
xtoy(com.pi,com.piG[i],com.ncode);
}
if(getdistance) {
if(com.seqtype==CODONseq)
DistanceMatNG86(fout,fpair[0],fpair[1],fpair[2],0);
else
DistanceMatAA(fout);
}
fflush(fout);
if(com.seqtype==AAseq && com.model==Poisson && !com.print)
PatternWeightJC69like(fout);
if(com.alpha || com.NSsites) {
s2=com.npatt*com.ncatG*sizeof(double);
if((com.fhK=(double*)realloc(com.fhK,s2))==NULL) error2("oom fhK");
}
if(com.runmode==-2 && com.Mgene!=1) {
if(com.seqtype==CODONseq)
PairwiseCodon(fout,fpair[3],fpair[4],fpair[5],com.space);
else
PairwiseAA(fout, fpair[0]);
}
else {
com.sconP = 2L *com.ncode*com.npatt*sizeof(double);
/* to be increased later in GetInitials() */
/* com.sconP = (com.ns-1)*com.ncode*com.npatt*sizeof(double); */
com.conP = (double*)realloc(com.conP, com.sconP);
printf("\n%9u bytes for distance",com.ns*(com.ns-1)/2*sizeof(double));
printf("\n%9u bytes for conP\n", com.sconP);
printf ("%9u bytes for fhK\n%9u bytes for space\n", s2, com.sspace);
if(com.conP==NULL)
error2("oom conP");
if (nnsmodels>1) {
for(insmodel=0; insmodel<nnsmodels; insmodel++) {
com.NSsites = nsmodels[insmodel];
if(com.NSsites<=NSpselection)
com.ncatG = com.NSsites+1;
else if(com.NSsites==NSM2aRel || com.NSsites==NSdiscrete)
com.ncatG = 3;
else if (com.NSsites==NSfreqs)
com.ncatG=5;
else if (com.NSsites==NSbetaw||com.NSsites==NS02normal)
com.ncatG = ncatG0 + 1;
else
com.ncatG = ncatG0;
if(com.NSsites==NSTgamma || com.NSsites==NSTinvgamma)
com.ncatG=KGaussLegendreRule;
if(com.NSsites==NSTgamma1 || com.NSsites==NSTinvgamma1)
com.ncatG=KGaussLegendreRule+1;
com.nrate = com.nkappa=(com.hkyREV?5:!com.fix_kappa);
if(com.NSsites==0 || com.NSsites==NSbetaw) com.nrate += !com.fix_omega;
else if(com.NSsites==NSnneutral) com.nrate ++;
else if(com.NSsites==NSpselection || com.NSsites==NSM2aRel)
com.nrate += 1+!com.fix_omega;
else if(com.NSsites==NSdiscrete)
com.nrate += com.ncatG;
printf("\n\nModel %d: %s\n",com.NSsites, NSsitesmodels[com.NSsites]);
fprintf(fout,"\n\nModel %d: %s",com.NSsites,NSsitesmodels[com.NSsites]);
fprintf(frst,"\n\nModel %d: %s",com.NSsites,NSsitesmodels[com.NSsites]);
fprintf(frub,"\n\nModel %d: %s",com.NSsites,NSsitesmodels[com.NSsites]);
if(com.NSsites) fprintf(fout," (%d categories)",com.ncatG);
FPN(fout);
#ifdef NSSITESBandits
com.fix_blength = (com.NSsites>0 ? 2 : 1);
if(com.NSsites>0) strcpy(com.treef,"M0tree");
#endif
Forestry(fout);
printf("\nTime used: %s\n", printtime(timestr));
fprintf(fout,"\nTime used: %s\n", printtime(timestr));
}
}
else {
if (com.Mgene==1) MultipleGenes(fout, fpair, com.space);
else if (com.runmode==0) Forestry(fout);
else if (com.runmode==3) StepwiseAddition(fout, com.space);
else if (com.runmode>=4) Perturbation(fout,(com.runmode==4),com.space);
else StarDecomposition(fout, com.space);
printf("\nTime used: %s\n", printtime(timestr));
fprintf(fout,"\nTime used: %s\n", printtime(timestr));
}
}
FPN(frst); fflush(frst);
FPN(frst1); fflush(frst1);
free(nodes);
} /* for (idata) */
/**************/
/*
printf("\nfalse positive: %6d\n", npositive);
fprintf(frst1, " false positive: %6d\n", npositive);
*/
fclose(frst);
k=0;
if(com.seqtype==1) k=(com.runmode==-2?6:3);
else if (com.runmode==-2) k=1;
FOR(i,k) fclose(fpair[i]);
if(com.ndata>1 && fseq) fclose(fseq);
fclose(fout); fclose(frub);
if(finitials) fclose(finitials);
FreeMemPUVR();
free(com.pose);
for(i=0; i<com.ns; i++) free(com.z[i]);
return (0);
}
/* x[]: t[ntime]; rgene[ngene-1]; kappa; p[](NSsites); omega[];
{ alpha(for NSsites) !! alpha, rho || rK[], fK[] || rK[], MK[] }
*/
int Forestry (FILE *fout)
{
static int times=0;
FILE *ftree, *frate=NULL;
int status=0, i,j=0,k, itree, ntree, np, iteration=1;
int pauptree=0, haslength;
double x[NP],xb[NP][2], xcom[NP-NBRANCH], lnL=0,lnL0=0, e=1e-8, tl=0, nchange=-1;
double *g=NULL, *H=NULL;
#ifdef NSSITESBandits
FILE *fM0tree;
#endif
if ((ftree=fopen(com.treef,"r"))==NULL) {
printf("\ntree file %s not found.\n", com.treef);
exit(-1);
}
GetTreeFileType(ftree, &ntree, &pauptree, 0);
if (com.alpha)
frate=(FILE*)gfopen(ratef,"w");
if (ntree>10 && com.npatt>10000 && com.print)
puts("\nlnf file may be large");
flnf=gfopen("lnf","w+");
fprintf(flnf,"%6d %6d %6d\n", ntree, com.ls, com.npatt);
if(com.seqtype==1 && com.aaDist>=FIT1) {
xtoy(com.pi,pcodon0,64);
zero(paa0,20);
FOR(i,com.ncode) paa0[GeneticCode[com.icode][FROM61[i]]]+=pcodon0[i];
pcodonClass=(double*)malloc(com.ncatG*64*sizeof(double));
if(pcodonClass==NULL) error2("oom pcodonClass");
}
for(itree=0; ntree==-1||itree<ntree; itree++,iteration=1) {
if(ReadTreeN(ftree,&haslength, &i,0,1))
{ puts("end of tree file."); break; }
printf("\nTREE # %2d\n", itree+1);
fprintf(fout,"\n\nTREE # %2d: ", itree+1);
fprintf(flnf,"\n\n%2d\n", itree+1);
if(com.print) fprintf (frst,"\n\nTREE # %2d\n", itree+1);
fprintf(frub,"\n\nTREE #%2d\n", itree+1);
if (com.fix_blength==2 && !haslength) error2("no branch lengths in tree");
if (com.fix_blength>0 && !haslength) com.fix_blength=0;
if (times++==0 && com.fix_blength>0 && haslength) {
if(com.clock) puts("\nBranch lengths in tree are ignored");
else {
if(com.fix_blength==2)
puts("\nBranch lengths in tree are fixed.");
else if(com.fix_blength==1)
puts("\nBranch lengths in tree used as initials.");
if(com.fix_blength==1) {
FOR(i,tree.nnode)
if((x[nodes[i].ibranch]=nodes[i].branch)<0)
x[nodes[i].ibranch]=1e-5;
}
}
}
LASTROUND=0;
if(com.cleandata)
nchange = MPScore(com.space);
if(com.ns<40) { OutTreeN(F0,0,0); printf(" MP score: %.0f",nchange); }
OutTreeN(fout,0,0); fprintf(fout," MP score: %.0f",nchange);
if(!com.clock && nodes[tree.root].nson<=2 && com.ns>2) {
puts("\nThis is a rooted tree, without clock. Check.");
fputs("\nThis is a rooted tree. Please check!",fout);
}
GetInitials(x, &i);
np = com.np;
if(noisy>=3 && np<100) matout(F0,x,1,np);
if(i==-1) iteration = 0;
if(np>NP || np-com.ntime>NP-NBRANCH) error2("raise NP");
if(com.sspace < spaceming2(np)) {
com.sspace = spaceming2(np);
printf ("\nspace adjusted to %9u bytes\n",com.sspace);
if((com.space=(double*)realloc(com.space,com.sspace))==NULL) {
printf("\ntrying to get %d bytes for ming2", com.sspace);
error2("oom space");
}
}
printf("\nntime & nrate & np:%6d%6d%6d\n",com.ntime,com.nrate,com.np);
/*
if(itree && !finitials) for(i=0;i<np-com.ntime;i++) x[com.ntime+i] = xcom[i];
*/
if(iteration && np) {
SetxBound(np, xb);
SetxInitials (np, x, xb); /* start within the feasible region */
}
PointconPnodes ();
lnL = com.plfun (x,np);
if(noisy) {
printf("\nnp =%6d", np);
printf("\nlnL0 = %12.6f\n",-lnL);
}
if(iteration && np) {
if(com.method == 1)
j = minB (noisy>2?frub:NULL, &lnL,x,xb, e, com.space);
else if (com.method==3)
j = minB2(noisy>2?frub:NULL, &lnL,x,xb, e, com.space);
else
j = ming2(noisy>2?frub:NULL,&lnL,com.plfun,NULL,x,xb, com.space,e,np);
if (j==-1 || lnL<=0 || lnL>1e7) status=-1; else status=0;
if(status) fprintf(fout,"\ncheck convergence..");
}
printf("Out..\nlnL = %12.6f\n",-lnL);
printf("%d lfun, %d EigenQcodon, %d P(t)\n",NFunCall, NEigenQ, NPMatUVRoot);
if (itree==0)
{ lnL0=lnL; FOR(i,np-com.ntime) xcom[i]=x[com.ntime+i]; }
else if (!j)
for (i=0; i<np-com.ntime; i++) xcom[i]=xcom[i]*.2+x[com.ntime+i]*0.8;
if(!LASTROUND && (com.NSsites==NSpselection||com.NSsites==NSM2aRel||com.NSsites==NSdiscrete
||com.NSsites==NSfreqs||com.NSsites==NS3normal)) {
/* transform back to p0, p1,... */
k=com.ntime+com.nrgene+com.nkappa+com.npi;
if(com.nparK) { /* HMM model for w */
k += com.ncatG;
for(i=0; i<com.ncatG; i++,k+=com.ncatG-1)
f_and_x(x+k,x+k,com.ncatG,0,0);
}
else {
j = (com.NSsites==NS3normal ? 3 : com.ncatG);
if(com.model && com.model<=NSbranch2) j=3;
f_and_x(x+k,x+k,j,0,0);
}
}
LASTROUND=1;
if(com.NSsites==NSdiscrete && com.aaDist==0 && com.model==0)
sortwM3(x);
if(com.clock) { /* move times into x[] */
for(i=0,j=!nodes[tree.root].fossil; i<tree.nnode; i++)
if(i!=tree.root && nodes[i].nson && !nodes[i].fossil)
x[j++] = nodes[i].age;
}
fprintf (fout,"\nlnL(ntime:%3d np:%3d): %13.6f %+14.6f\n",
com.ntime, np, -lnL, -lnL+lnL0);
if(com.fix_blength<2) {
OutTreeB(fout); FPN(fout);
}
/*
OutTreeB(fout); FPN(fout);
if(com.fix_blength==2) {
for(i=0; i<tree.nbranch; i++) fprintf(fout, " %8.5f", nodes[tree.branches[i][1]].branch);
FPN(fout);
}
*/
for(i=0; i<np; i++) fprintf(fout," %8.6f",x[i]);
FPN(fout); fflush(fout);
if (com.getSE) {
puts("Calculating SE's");
if(com.sspace < np*(np+1)*sizeof(double)) {
com.sspace = np*(np+1)*sizeof(double);
if((com.space=(double*)realloc(com.space,com.sspace))==NULL)
error2("oom space for SE");
}
g = com.space;
H = g + com.np;
HessianSKT2004 (x, lnL, g, H);
if(com.getSE>=2 && com.clock==0 && nodes[tree.root].nson==3) { /* g & H */
fprintf(frst2,"\n %d\n\n", com.ns);
OutTreeN(frst2, 1, 1); fprintf(frst2,"\n\n");
for(i=0; i<com.ntime; i++)
if(x[i]>0.0004 && fabs(g[i])<0.005) g[i] = 0;
for(i=0; i<com.ntime; i++) fprintf(frst2," %9.6f", x[i]); fprintf(frst2, "\n\n");
for(i=0; i<com.ntime; i++) fprintf(frst2," %9.6f", g[i]); fprintf(frst2, "\n\n");
fprintf(frst2, "\nHessian\n\n");
for(i=0; i<com.ntime; i++,FPN(frst2))
for(j=0; j<com.ntime; j++)
fprintf(frst2," %10.4g", H[i*np+j]);
fflush(frst2);
}
for(i=0; i<np*np; i++) H[i] *= -1;
matinv(H, np, np, H+np*np);
fprintf(fout,"SEs for parameters:\n");
for(i=0; i<np; i++)
fprintf(fout," %8.6f", (H[i*np+i]>0. ? sqrt(H[i*np+i]) : -1));
FPN(fout);
}
if(com.seqtype==1 && com.ntime && com.clock==0)
fprintf(fout,"\nNote: Branch length is defined as number of nucleotide substitutions per codon (not per neucleotide site).\n");
if(com.Mgene>1) {
fprintf(fout,"Note: Branch length is defined for the first gene (site partition).\n");
fprintf(fout,"For other genes, look at \"rates for genes\".\n");
}
/* if (com.clock) SetBranch (x); */
if(com.clock && com.nbtype>1)
fputs("\nWarning: branch rates are not yet applied in tree length and branch lengths",fout);
if(AbsoluteRate)
fputs("\nNote: mutation rate is not applied to tree length. Tree has times, for TreeView",fout);
for(i=0,tl=0; i<tree.nnode; i++)
if(i!=tree.root) tl += nodes[i].branch;
fprintf(fout,"\ntree length = %9.5f%s\n",tl,com.ngene>1?" (1st gene)":"");
#ifdef NSSITESBandits
if(com.NSsites==0) {
for(i=com.ntime; i<com.np; i++) fprintf(frst1,"\t%.3f", x[i]);
fprintf(frst1,"\t%.2f\t%.3f", tl, -lnL);
fM0tree=(FILE*)gfopen("M0tree", (insmodel==0?"w":"a"));
fprintf(fM0tree, "%d %d\n", com.ns, 1);
OutTreeN(fM0tree,1,1); FPN(fM0tree);
fclose(fM0tree);
}
else {
for(i=com.ntime; i<com.np; i++) fprintf(frst1,"\t%.3f",x[i]);
fprintf(frst1,"\t%.3f",-lnL);
}
#else
for(i=0; i<com.np; i++) fprintf(frst1,"\t%.3f",x[i]);
fprintf(frst1,"\t%.3f", -lnL);
/*
fprintf(frst1,"\t%.4f", (com.ns==2 ? x[0]*2 : 0));
for(i=0; i<com.nkappa; i++) fprintf(frst1,"\t%.3f",x[com.ntime+i]);
fprintf(frst1,"\t%.4f", com.omega);
fprintf(frst1,"\t%.3f", -lnL);
*/
#endif
FPN(fout); OutTreeN(fout,0,1); FPN(fout);
FPN(fout); OutTreeN(fout,1,1); FPN(fout);
if(com.clock) {
FPN(fout); OutTreeN(fout,1,PrNodeNum); FPN(fout);
}
if(com.np-com.ntime || com.clock)
DetailOutput(fout,x, H);
if (com.seqtype==AAseq && com.model>=REVaa_0)
EigenQaa(fout, Root, U, V, x+com.ntime+com.nrgene);
if (com.NSsites)
lfunNSsites_rate(frst,x,np);
if (com.print) {
if(com.rho==0 && com.nparK==0 && com.clock<=1)
AncestralSeqs(frst,x);
if(!com.NSsites && com.plfun!=lfun)
lfunRates(frate,x,np);
}
com.print -= 9;
lnL = com.plfun(x,np);
com.print += 9;
fflush(fout); fflush(flnf); fflush(frst); fflush(frst1);
} /* for(itree) */
fclose(ftree);
if(frate) fclose(frate);
if (com.aaDist && com.aaDist<10 && com.aaDist!=AAClasses
&& (com.seqtype==CODONseq||com.model==FromCodon))
printf("\n%s, %s.\n", com.daafile, (com.aaDist>0 ? "geometric" : "linear"));
if(com.seqtype==1 && com.aaDist>=FIT1) free(pcodonClass);
if(ntree==-1) ntree=itree;
if(ntree>1) {
rewind(flnf);
rell(flnf, fout, ntree);
}
fclose(flnf);
return (0);
}
double *PointKappa (double xcom[], int igene)
{
/* This points to the kappa parameters in xcom[], by looking at com.model,
igene, et&c.
*/
int k=com.nrgene;
int nka=(com.hkyREV?5:1), nw=(com.aaDist==AAClasses?com.nOmegaType:1);
if(com.Mgene>1 && com.Mgene>=3)
k += igene*(nka + nw);
if(com.fix_kappa) return(&com.kappa);
return(xcom+k);
}
double *PointOmega (double xcom[], int igene, int inode, int isiteclass)
{
/* This points to the omega parameters in xcom[], by looking at com.model,
com.NSsites and igene. This sometimes points to com.omega or com.rK[].
This is called by SetParameters(), DetailOutput(), etc.
Difficulties in using this with lfunt() etc.
Trying to remove global variables com.pomega and com.pkappa through
PointOmega and PointKappa, but was unsuccessful when too many changes were
made at the same time. Perhaps look at this later. Note that some
variables are passed over the head from lfunt() etc. to EigenQcodon().
Ziheng Notes: 8 August 2003.
*/
int k = com.nrgene+com.nkappa, backfore;
int nka=(com.hkyREV?5:1), nw=(com.aaDist==AAClasses?com.nOmegaType:1);
if (com.seqtype!=CODONseq && com.model!=FromCodon)
error2("should not be here.");
if(com.NSsites==0 && com.model==0) { /* simple case: one ratio */
if(com.ngene<=1) {
if(com.fix_omega) return (&com.omega_fix); /* fix_omega */
else ;
}
else if(com.Mgene>=3)
k += igene*(nka + nw) + nka;
}
else if(com.NSsites==0 && com.model) { /* branch model */
if (com.aaDist==0) {
if(com.fix_omega && nodes[inode].label==com.nbtype-1)
return (&com.omega_fix);
else k += (int)nodes[inode].label;
}
else if(com.aaDist==AAClasses)
k += (int)nodes[inode].label*com.nOmegaType;
}
else if (com.NSsites && com.model==0) { /* site model */
if(com.aaDist<10)
k += com.ncatG-1+2*isiteclass;
else if(com.aaDist==FIT1)
k += com.ncatG-1+4*isiteclass;
else if(com.aaDist==FIT2)
k += com.ncatG-1+5*isiteclass;
else
return (&com.rK[isiteclass]);
}
else if (com.NSsites && com.model<=NSbranch2) { /* branch&site models A&B */
k += 2; /* skip the frequencies. */
backfore = (int)nodes[inode].label;
if(isiteclass<2)
return(&com.rK[isiteclass]);
else if(isiteclass==2) {
if(com.fix_omega && backfore)
return(&com.omega_fix);
else
k += 2 + (com.NSsites==NSpselection?0:2) + backfore;
}
}
else { /* NSbranch3: Clade models C and D */
k += com.ncatG-1; /* skip the frequencies. */
backfore = (int)nodes[inode].label;
if(isiteclass<com.ncatG-1)
return(&com.rK[isiteclass]);
else if(isiteclass == com.ncatG-1) {
if(com.fix_omega && backfore==com.nbtype-1)
return(&com.omega_fix);
else
k += 2 + (com.NSsites==NSpselection?0:2) + backfore;
}
}
return (xcom+k);
}
int sortwM3(double x[])
{
/* sort the w values for NSsites=NSdiscrete
This assumes that com.freqK[] and com.rK[] have been initialized.
*/
int i, k=com.ntime+com.nrgene+com.nkappa+com.npi, index[NCATG];
double space[NCATG];
if(com.NSsites!=NSdiscrete) error2("sortwM3");
if(fabs(1-sum(com.freqK,com.ncatG))>1e-6) error2("sortwM3: freqK");
if(com.nparK) { puts("\asortwM3 for HMM not implemented yet.."); return(-1); }
indexing(com.rK, com.ncatG, index, 0, (int*)space);
xtoy(com.rK,space,com.ncatG);
FOR(i,com.ncatG) com.rK[i]=space[index[i]];
xtoy(com.freqK,space,com.ncatG);
FOR(i,com.ncatG) com.freqK[i]=space[index[i]];
FOR(i,com.ncatG-1) x[k+i]=com.freqK[i];
FOR(i,com.ncatG) x[k+com.ncatG-1+i]=com.rK[i];
return(0);
}
void printParametersNSsites (FILE* fout, double x[])
{
int i,j, k=com.ntime+com.nrgene+com.nkappa+com.npi;
double w[NBTYPE][3];
if(!com.NSsites) error2("should not be here");
fprintf(fout,"\n\ndN/dS (w) for site classes (K=%d)\n",com.ncatG);
if(com.model==0) {
fputs("\np: ",fout); for(i=0; i<com.ncatG; i++) fprintf(fout," %8.5f", com.freqK[i]);
fputs("\nw: ",fout); for(i=0; i<com.ncatG; i++) fprintf(fout," %8.5f", com.rK[i]);
i = com.ncatG-1;
if(com.freqK[i] < 1e-5 && com.rK[i] > 1)
fprintf(fout,"\n(note that p[%d] is zero)\n", i);
}
else if(com.model<=NSbranch2) {
fprintf(fout,"\nsite class 0 1 2a 2b");
fprintf(fout,"\nproportion ");
for(i=0; i<com.ncatG; i++) fprintf(fout," %8.5f", com.freqK[i]);
fprintf(fout,"\nbackground w ");
for(i=0; i<com.ncatG; i++) fprintf(fout," %8.5f", com.rK[i%2]);
fprintf(fout,"\nforeground w ");
for(i=0; i<com.ncatG-2; i++) fprintf(fout," %8.5f", com.rK[i%2]);
for(i=0; i<2; i++) fprintf(fout," %8.5f", (com.fix_omega?com.omega_fix:x[com.np-1]));
if(com.freqK[2] < 1e-5 && com.rK[2] > 1)
fprintf(fout, "\n(note that p[2] is zero)\n");
}
else if (com.model==NSbranch3) {
k += com.ncatG-1 + (com.NSsites==3 && com.ncatG>2) + 1; /* freqs & w0 & w1 */
for(i=0; i<com.nbtype; i++) {
for(j=0; j<com.ncatG-1; j++)
w[i][j] = com.rK[j];
w[i][com.ncatG-1] = (i==com.nbtype-1 && com.fix_omega ? com.omega_fix : x[k++]);
}
fprintf(fout,"\nsite class ");
for(i=0; i<com.ncatG; i++) fprintf(fout," %9d", i);
fprintf(fout,"\nproportion ");
for(i=0; i<com.ncatG; i++) fprintf(fout, " %9.5f", com.freqK[i]);
for(i=0; i<com.nbtype; i++) {
fprintf(fout,"\nbranch type %d: ", i);
for(j=0; j<com.ncatG; j++) fprintf(fout," %9.5f", w[i][j]);
}
i = com.ncatG-1;
if(com.freqK[i] < 1e-5) fprintf(fout,"\n(note that p[%d] is zero)\n", i);
}
fprintf(fout, "\n");
}
static int ijAAref=19*20+9;
/* reference aa pair: VI (for REVaa, REVaa_0, AAClasses to estimate Grantham)
The rate for this pair is set to 1, and other rates are relative to it.
*/
#define E1N(m,s) (s/sqrt(PI*2)*exp(-square((1-m)/s)/2)+m*(1-CDFNormal((1-m)/s)))
void DetailOutput (FILE *fout, double x[], double var[])
{
/* var[] is used for codon models if com.getSE=1 to calculate the variances
of dS and dN.
*/
int i,j,k=com.ntime, np=com.np,npclass, ibtype;
double om=-1,N=-1,S=0,dN=0,dS=0,dSt,dNt, mr=0, vtw[4],vSN[4], omclass[NCATG];
double phi1=0,phi2=0, t, *tdSdNb=NULL, y;
double mu[3]={0,1,2},sig[3]={-1}; /* 3normal: mu0=0 fixed. mu2 estimated */
double fb3x4[12];
fprintf(fout,"\nDetailed output identifying parameters\n");
if(com.clock) OutputTimesRates(fout, x, var);
k = com.ntime;
if (com.nrgene) {
fprintf (fout, "\nrates for %d genes:%6.0f", com.ngene, 1.);
for(i=0; i<com.nrgene; i++)
fprintf (fout, " %8.5f", x[k++]);
FPN(fout);
}
if (com.seqtype==CODONseq || com.model==FromCodon) {
if (com.hkyREV) {
fprintf(fout,"a (TC) & b (TA) & c (TG) & d (CA) & e (CG): ");
FOR(i,5) fprintf(fout,"%8.5f ", x[k++]); FPN(fout);
}
else if (!com.fix_kappa && com.Mgene<=2)
fprintf(fout,"\nkappa (ts/tv) = %8.5f\n", x[k++]);
if(com.npi) {
if (com.codonf==F1x4 || com.codonf==F1x4MG || com.codonf>=FMutSel0) {
for(j=0,fb3x4[3]=1; j<3; j++) fb3x4[j] = x[k+j];
abyx(1/sum(fb3x4,4), fb3x4, 4);
fprintf(fout, "\nFrequency parameters:\n");
for(j=0;j<4;j++)
fprintf(fout, " %9.5f (%c)", fb3x4[j], BASEs[j]);
if(com.codonf==FMutSel)
for(j=0;j<4;j++)
fprintf(frst1, "\t%.4f", fb3x4[j]);
}
else if (com.codonf==F3x4 || com.codonf==F3x4MG) {
for(j=0;j<3;j++) {
xtoy(x+k+j*3, fb3x4+j*4, 3);
fb3x4[j*4+3] = 1;
abyx(1/sum(fb3x4+j*4,4), fb3x4+j*4, 4);
}
fprintf(fout, "\nCodon frequency model: %s", codonfreqs[com.codonf]);
fprintf(fout, "\nFrequency parameters:\n");
for(i=0; i<3; i++,FPN(fout)) {
fprintf(fout, "Position %d: ", i+1);
for(j=0;j<4;j++)
fprintf(fout, " %9.5f (%c)", fb3x4[i*4+j], BASEs[j]);
}
}
if(com.npi>3 || com.codonf!=FMutSel) {
fprintf(fout, "\nEquilibrium codon frequencies (evolver-style):\n");
for(j=0; j<64; j++) {
fprintf(fout," %11.8f", GeneticCode[com.icode][j]==-1?0:com.pi[FROM64[j]]);
if((j+1)%4==0) FPN(fout);
}
}
if(com.npi>3 && com.codonf>=FMutSel0) {
if(com.codonf==FMutSel0) {
fprintf(fout, "\nEquilibrium amino acid frequencies:\n");
for(j=0; j<20; j++) {
fprintf(fout," %11.8f", com.piAA[j]);
if((j+1)%10==0) FPN(fout);
}
fprintf(fout, "\nfitness for %d codons (amino acid %c has fitness 0)\n", com.ncode, AAs[19]);
i = GeneticCode[com.icode][FROM61[com.ncode-1]];
y = (i == 19 ? 0 : com.ppi[3+i]);
for(j=0; j<com.ncode; j++) {
i = GeneticCode[com.icode][FROM61[j]];
fprintf(fout," %9.6f", (i == 19 ? 0 : com.ppi[3+i])-y);
}
}
else {
fprintf(fout, "\nfitness for %d codons (GGG has fitness 0)\n", com.ncode-1);
for(j=0; j<com.ncode-1; j++)
fprintf(fout," %9.6f", com.ppi[3+j]);
}
FPN(fout);
}
k += com.npi;
if(com.codonf == FMutSel)
SelectionCoefficients(frst, com.pkappa, com.ppi, com.omega);
}
/* dN/dS by averaging over site classes.
Qfactor_NS was calculated during ML iteration and is used here..
*/
if(com.NSsites && com.model==0) {
for(j=0,dS=dN=0; j<com.ncatG; j++) {
if(com.aaDist) {
if(com.aaDist<10)
com.pomega = x+k+com.ncatG-1+2*j;
else if(com.aaDist >= FIT1) {
com.pomega = x+k+com.ncatG-1+j*(4+(com.aaDist==FIT2));
xtoy(pcodonClass+j*64, com.pi, com.ncode);
}
}
mr = -1;
EigenQcodon(2,1,&S,&dSt,&dNt,NULL,NULL,NULL, &mr, com.pkappa,com.rK[j],PMat);
/* t=1 used here, and dS & dN used later for each branch */
dS += com.freqK[j]*dSt;
dN += com.freqK[j]*dNt;
omclass[j] = dNt/dSt;
}
om = dN/dS;
dS *= Qfactor_NS;
dN *= Qfactor_NS;
N = com.ls*3 - S;
}
if(!com.fix_omega && com.NSsites==0 && com.model==0 && com.aaDist!=7 && com.Mgene<=2)
fprintf(fout,"\nomega (dN/dS) = %8.5f\n", x[k++]);
/* dN/dS rate ratios for classes */
if (com.NSsites >= NSgamma) {
fprintf(fout,"\nParameters in M%d (%s):\n ", com.NSsites, NSsitesmodels[com.NSsites]);
if(com.NSsites == NSgamma)
fprintf(fout," a=%9.5f b=%9.5f\n",x[k],x[k+1]);
else if(com.NSsites == NS2gamma)
fprintf(fout," p0 = %9.5f a0 = %9.5f b0 = %9.5f\n(p1 = %9.5f) a1 = %9.5f (b1 = %9.5f)\n",
x[k],x[k+1],x[k+2], 1-x[k], x[k+3],x[k+3]);
else if(com.NSsites == NSbeta)
fprintf(fout,"p = %9.5f q = %9.5f\n",x[k],x[k+1]);
else if(com.NSsites == NSbetaw)
fprintf(fout," p0 = %9.5f p = %9.5f q = %9.5f\n (p1 = %9.5f) w = %9.5f\n",
x[k],x[k+1],x[k+2], 1-x[k], (com.fix_omega?com.omega:x[k+3]));
else if(com.NSsites == NSbetagamma)
fprintf(fout," p0 = %9.5f p = %9.5f q = %9.5f\n(p1 = %9.5f) a = %9.5f b = %9.5f\n",
x[k],x[k+1],x[k+2], 1-x[k], x[k+3],x[k+4]);
else if(com.NSsites == NSbeta1gamma)
fprintf(fout," p0 = %9.5f p = %9.5f q = %9.5f\n(p1 = %9.5f) a = %9.5f b = %9.5f\n",
x[k],x[k+1],x[k+2], 1-x[k], x[k+3],x[k+4]);
else if(com.NSsites == NSbeta1normal)
fprintf(fout," p0 = %9.5f p = %9.5f q = %9.5f\n(p1 = %9.5f) u = %9.5f s = %9.5f\n",
x[k],x[k+1],x[k+2], 1-x[k], x[k+3],x[k+4]);
else if(com.NSsites == NS02normal)
fprintf(fout,"p0 = %9.5f p1 = %9.5f u2 = %9.5f s1 = %9.5f s2 = %9.5f\n",
x[k],x[k+1],x[k+2],x[k+3],x[k+4]);
else if(com.NSsites == NS3normal)
fprintf(fout,"p0 = %9.5f p1 = %9.5f (p2 = %9.5f)\n u2 = %9.5f s0 = %9.5f s1 = %9.5f s2 = %9.5f\n",
x[k],x[k+1], 1-x[k]-x[k+1], x[k+2],x[k+3],x[k+4],x[k+5]);
else if(com.NSsites == NSTgamma)
fprintf(fout,"alpha = %9.5f beta = %9.5f T = %9.5f\n", x[k],x[k+1],(com.fix_omega ? com.omega_fix : x[k+2]));
else if(com.NSsites == NSTinvgamma)
fprintf(fout,"alpha = %9.5f beta = %9.5f T = %9.5f\n", x[k],x[k+1],(com.fix_omega ? com.omega_fix : x[k+2]));
else if(com.NSsites == NSTgamma1)
fprintf(fout,"p0 = %9.5f (p1 = %9.5f) alpha = %9.5f beta = %9.5f T = %9.5f\n", x[k],1-x[k],x[k+1],x[k+2],(com.fix_omega ? com.omega_fix : x[k+3]));
else if(com.NSsites==NSTinvgamma1)
fprintf(fout,"p0 = %9.5f (p1 = %9.5f) alpha = %9.5f beta = %9.5f T = %9.5f\n", x[k],1-x[k],x[k+1],x[k+2],(com.fix_omega ? com.omega_fix : x[k+3]));
}
if (com.NSsites==NSdiscrete && com.aaDist) { /* structural site classes */
npclass=(com.aaDist<10 ? 2 : (com.aaDist==FIT1?4:5));
fprintf(fout,"\nParameters in each class (%d)",npclass);
fprintf(fout,"%s:\n\n",
(com.aaDist<10 ? "(b, a)" : "(a_p, p*, a_v, v*, b)"));
for(j=0,k+=com.ncatG-1; j<com.ncatG; j++,FPN(fout)) {
fprintf(fout,"%d: f=%8.5f, ",j+1,com.freqK[j]);
FOR(i,npclass) fprintf(fout,"%9.5f",x[k++]);
fprintf(fout," dN/dS = %7.5f", omclass[j]);
}
}
else if (com.NSsites && com.aaDist==0) {
printParametersNSsites(fout,x);
if (com.nparK) {
fprintf(fout,"\nTransition matrix M in HMM: M_ij=Prob(i->j):\n");
matout(fout, com.MK, com.ncatG, com.ncatG);
}
}
else if(com.aaDist && com.aaDist<=6) { /* one class (YNH98, Genetics) */
k = com.ntime+com.nrgene+com.nkappa+com.npi;
fprintf (fout,"\nb = %9.5f", x[k++]);
if (com.seqtype==CODONseq) fprintf (fout,"\na = %9.5f\n", x[k++]);
}
else if(com.aaDist && com.aaDist>=11) { /* fitness, one class */
fprintf (fout,"\nfitness model (a_p, p*, a_v, v*, (and w0 for FIT2):\n");
k = com.ntime+com.nrgene+com.nkappa+com.npi;
FOR(i,4+(com.aaDist==FIT2)) fprintf(fout," %9.5f",x[k++]); FPN(fout);
}
else if(com.model==0 && com.NSsites==0 && !com.fix_omega && com.Mgene>2) {
if(!com.fix_kappa && !com.fix_omega) {
for(i=0; i<com.ngene; i++,k+=2)
fprintf(fout,"\ngene #%2d: kappa = %9.5f omega = %9.5f", i+1, x[k], x[k+1]);
}
else if(com.fix_kappa) {
for(i=0; i<com.ngene; i++,k++)
fprintf(fout,"\ngene #%2d: omega = %9.5f", i+1, x[k]);
}
else if(com.fix_omega) {
for(i=0; i<com.ngene; i++,k++)
fprintf(fout,"\ngene #%2d: kappa = %9.5f", i+1, x[k]);
}
}
}
else
k += com.nrate;
for(j=0; j<com.nalpha; j++) {
if (!com.fix_alpha)
fprintf(fout,"\nalpha (gamma, K = %d) = %8.5f", com.ncatG,(com.alpha=x[k++]));
if(com.nalpha>1)
DiscreteGamma(com.freqK,com.rK,com.alpha,com.alpha,com.ncatG,DGammaUseMedian);
fprintf(fout,"\nrate: "); FOR(i,com.ncatG) fprintf(fout," %8.5f",com.rK[i]);
fprintf(fout,"\nfreq: "); FOR(i,com.ncatG) fprintf(fout," %8.5f",com.freqK[i]);
}
if (com.rho) {
if (!com.fix_rho) fprintf (fout, "rho (correlation) = %8.5f\n", x[k]);
fprintf (fout, "transition probabilities between rate categories:\n");
for(i=0;i<com.ncatG;i++,FPN(fout)) FOR(j,com.ncatG)
fprintf(fout," %8.5f",com.MK[i*com.ncatG+j]);
}
if (com.aaDist==AAClasses) {
if(com.model==0) {
fprintf (fout, "\nw (dN/dS) classes for amino acid pairs:\n");
for(k=0; k<com.nOmegaType; k++) {
fprintf (fout, " %9.5f: ", x[com.ntime+com.nrgene+com.nkappa+k]);
for(i=0; i<20; i++) for(j=0; j<i; j++)
if (OmegaAA[i*(i-1)/2+j]==k) fprintf(fout," %c%c", AAs[i],AAs[j]);
if (k==0) fprintf(fout, " (background ratio)");
FPN(fout);
}
/* output for bubble plot */
if(com.seqtype==1) {
for(i=0; i<20; i++) for(j=0; j<i; j++) {
y = 0;
if(AA1STEP[i*(i-1)/2+j])
y = x[com.ntime+com.nrgene+com.nkappa + OmegaAA[i*(i-1)/2+j]]; /* omega */
fprintf(frst, "%c%c %3d %3d %8.5f\n", AAs[i], AAs[j], i+1, j+1, y);
}
}
}
else {
fprintf (fout, "\nw (dN/dS) for branch-type and amino acid class:\n");
k = com.ntime+com.nrgene+com.nkappa+com.npi;
for(i=0; i<com.nbtype; i++) {
fprintf(fout, "Branch type %d: ", i);
for(j=0; j<com.nOmegaType; j++) {
fprintf (fout, " %9.5f", x[k++]);
}
FPN(fout);
}
}
}
/* dN & dS for each branch in the tree */
if(com.seqtype==CODONseq && com.ngene==1 && (com.model==0 || com.NSsites==0)
/*||com.model==FromCodon||com.aaDist==AAClasses */){
tdSdNb = (double*)malloc(tree.nnode*3*sizeof(double));
if(tdSdNb==NULL) error2("oom DetailOutput");
if(com.model && com.aaDist!=AAClasses ) { /* branch models */
fprintf(fout, "\nw (dN/dS) for branches: ");
k = com.ntime+com.nrgene+com.nkappa+com.npi;
for(i=0; i<com.nOmega-1; i++)
fprintf(fout, " %7.5f", x[k+i]);
fprintf(fout, " %7.5f", (com.fix_omega ? com.omega : x[k+i]));
FPN(fout);
}
fputs("\ndN & dS for each branch\n\n",fout);
fprintf(fout,"%7s%11s%8s%8s%8s%8s%8s %5s %5s\n\n",
"branch","t","N","S","dN/dS","dN","dS","N*dN","S*dS");
for(i=0,dNt=dSt=0; i<tree.nbranch; i++) {
fprintf(fout,"%4d..%-3d ",tree.branches[i][0]+1,tree.branches[i][1]+1);
k = com.ntime+com.nrgene+com.nkappa+com.npi;
/* if(com.codonf >= FMutSel0)
com.ppi = x+com.ntime+com.nrgene+com.nkappa;
*/
t = nodes[tree.branches[i][1]].branch;
if(com.NSsites==0) {
if (com.aaDist) om=-1; /* not used in EigenQcodon() */
else if (com.model==0 || com.model==FromCodon)
om = (com.fix_omega?com.omega:x[k]);
else if (com.model==NSbranchB) om = x[k+i];
else if (com.model==NSbranch2) om = nodes[tree.branches[i][1]].omega;
if(com.model && com.aaDist)
com.pomega = x + com.ntime + com.nrgene + !com.fix_kappa + com.npi
+ (int)nodes[tree.branches[i][1]].label*com.nOmegaType;
mr = 0;
EigenQcodon(2,t,&S,&dS,&dN, NULL,NULL,NULL, &mr, com.pkappa,om,PMat); /* PMat destroyed! */
dNt += dN;
dSt += dS;
if (com.aaDist) om = dN/dS;
/*
if(dS<.01/com.ls) om = -1;
else if(om==-1) om = dN/dS;
if(com.model==0) om = com.omega;
*/
N = com.ls*3-S;
if(com.model) {
tdSdNb[i] = t;
tdSdNb[tree.nnode+i] = dS;
tdSdNb[tree.nnode*2+i] = dN;
}
fprintf(fout," %7.3f %7.1f %7.1f %7.4f %7.4f %7.4f %5.1f %5.1f",
t,N,S,om,dN,dS,N*dN,S*dS);
/* fprintf(frst,"%8.1f%8.1f %9.5f%9.4f%9.4f",N,S,om,dN,dS); */
/* om not used in AAClasses model */
if(com.getSE>1&&com.fix_blength<2&&!com.clock&&com.aaDist!=AAClasses){
vtw[0] = var[i*np+i];
vtw[3] = var[k*np+k];
vtw[1] = vtw[2] = var[i*np+k];
VariancedSdN(t, om, vtw, vSN);
fprintf(fout," dN = %7.4f +- %.4f dS = %7.4f +- %.4f",
dN,(vSN[3]>0?sqrt(vSN[3]):-0),dS,(vSN[0]>0?sqrt(vSN[0]):-0));
fprintf(fout," (method 2)");
}
FPN(fout);
}
else if(com.model==0) { /* NSsites & other site-class models */
fprintf(fout,"%9.3f %8.1f %8.1f %8.4f %8.4f %8.4f %6.1f %6.1f\n",
t,N,S,om,dN*t,dS*t, N*dN*t,S*dS*t);
}
else { /* NSbranchsites models */
;
}
} /* for (i) */
if(com.NSsites==0) {
fprintf(fout,"\ntree length for dN: %12.4f\ntree length for dS: %12.4f\n", dNt,dSt);
fprintf(frst1,"\t%.4f\t%.4f", dNt, dSt);
}
if(com.model && com.NSsites==0) {
fprintf(fout,"\ndS tree:\n");
for(i=0; i<tree.nbranch; i++)
nodes[tree.branches[i][1]].branch = tdSdNb[tree.nnode+i];
OutTreeN(fout,1,1);
fprintf(fout,"\ndN tree:\n");
for(i=0; i<tree.nbranch; i++)
nodes[tree.branches[i][1]].branch = tdSdNb[tree.nnode*2+i];
OutTreeN(fout,1,1); FPN(fout);
/* revert branch lengths to the original values */
for(i=0; i<tree.nbranch; i++)
nodes[tree.branches[i][1]].branch = tdSdNb[i];
free(tdSdNb);
/* the first label is the label assigned in the tree file. The second is w ratio */
if(com.aaDist==0) {
fprintf(fout,"\nw ratios as labels for TreeView:\n");
OutTreeN(fout, 1, PrOmega); FPN(fout);
}
}
} /* if codonseqs */
FPN(fout); fflush(fout);
}
void ReadNSsitesModels(char *line)
{
/* This reads the line NSsites = 0 1 2 3 7 8 in codeml.ctl.
*/
char *pline;
int pop_digit;
if ((pline=strstr(line, "="))==NULL) error2(".ctl file error NSsites");
pline++;
for (nnsmodels=0; nnsmodels<maxNSsitesModels; nnsmodels++) {
if(sscanf(pline, "%d", &nsmodels[nnsmodels]) != 1) break;
for(pop_digit=0; ; ) {
if(isdigit(*pline)) { pline++; pop_digit=1; }
else if(isspace(*pline)) {
pline++;
if(pop_digit) break;
}
else error2(".ctl file NSsites line strange.");
}
if(nsmodels[nnsmodels]<0 || nsmodels[nnsmodels]>=maxNSsitesModels)
error2("NSsites model");
}
com.NSsites=nsmodels[0];
}
int ReadDaafiles(char *line)
{
/* This reads the daa files and set up the eigen matrices U,V,Root for combined
clock analyses of multiple protein data sets (clock = 5 or 6).
*/
int i, ng=(com.ndata>1?com.ndata:NGENE), markgenes[NGENE];
splitline(line, markgenes);
for(i=0; i<ng; i++) {
if(!isalnum(line[markgenes[i]])) break;
sscanf(line+markgenes[i], "%s", data.daafile[i]);
printf("protein %2d uses %s\n", i+1, data.daafile[i]);
}
return(0);
}
int GetOptions (char *ctlf)
{
int iopt, i,j, nopt=37, lline=255;
char line[255], *pline, opt[99], *comment="*#";
char *optstr[] = {"seqfile", "outfile", "treefile", "seqtype", "noisy",
"cleandata", "runmode", "method",
"clock", "TipDate", "getSE", "RateAncestor", "CodonFreq", "estFreq", "verbose",
"model", "hkyREV", "aaDist","aaRatefile",
"NSsites", "NShmm", "icode", "Mgene", "fix_kappa", "kappa",
"fix_omega", "omega", "fix_alpha", "alpha","Malpha", "ncatG",
"fix_rho", "rho", "ndata", "bootstrap", "Small_Diff", "fix_blength"};
double t;
FILE *fctl;
int ng=-1, markgenes[NGENE+99];
char *daafiles[]={"", "grantham.dat", "miyata.dat",
"g1974c.dat","g1974p.dat","g1974v.dat","g1974a.dat"};
fctl=gfopen(ctlf,"r");
if (noisy) printf ("\n\nReading options from %s..\n", ctlf);
for (;;) {
if (fgets(line, lline, fctl) == NULL) break;
for (i=0,t=0,pline=line; i<lline&&line[i]; i++)
if (isalnum(line[i])) { t=1; break; }
else if (strchr(comment,line[i])) break;
if (t==0) continue;
sscanf (line, "%s%*s%lf", opt,&t);
if ((pline=strstr(line, "="))==NULL)
error2("err: option file. add space around the equal sign?");
for (iopt=0; iopt<nopt; iopt++) {
if (strncmp(opt, optstr[iopt], 8)==0) {
if (noisy>=9)
printf ("\n%3d %15s | %-20s %6.2f", iopt+1,optstr[iopt],opt,t);
switch (iopt) {
case ( 0): sscanf(pline+1, "%s", com.seqf); break;
case ( 1): sscanf(pline+1, "%s", com.outf); break;
case ( 2): sscanf(pline+1, "%s", com.treef); break;
case ( 3): com.seqtype=(int)t; break;
case ( 4): noisy=(int)t; break;
case ( 5): com.cleandata=(char)t; break;
case ( 6): com.runmode=(int)t; break;
case ( 7): com.method=(int)t; break;
case ( 8): com.clock=(int)t; break;
case ( 9):
sscanf(pline+1, "%lf%lf", &com.TipDate, &com.TipDate_TimeUnit);
break;
case (10): com.getSE=(int)t; break;
case (11): com.print=(int)t; break;
case (12): com.codonf=(int)t; break;
case (13): com.npi=(int)t; break;
case (14): com.verbose=(int)t; break;
case (15): com.model=(int)t; break;
case (16): com.hkyREV=(int)t; break;
case (17): com.aaDist=(int)t; break;
case (18):
sscanf(pline+2,"%s",com.daafile);
if(com.seqtype==2 && com.ndata>1 && (com.clock==5 || com.clock==6)) {
ReadDaafiles(pline+2);
break;
}
break;
case (19): ReadNSsitesModels(line); break;
case (20): com.nparK=(int)t; break;
case (21):
com.icode=(int)t;
if(com.seqtype==1 && (com.clock==5 || com.clock==6)) {
ng = splitline (++pline, markgenes);
for(j=0; j<min2(ng,com.ndata); j++)
if(!sscanf(pline+markgenes[j],"%d",&data.icode[j])) break;
for(j=0; j<min2(ng,com.ndata); j++) printf("%4d", data.icode[j]); FPN(F0);
}
break;
case (22): com.Mgene=(int)t; break;
case (23): com.fix_kappa=(int)t; break;
case (24):
com.kappa=t;
if(com.seqtype==1 && com.fix_kappa && (com.clock==5 || com.clock==6)) {
ng = splitline (++pline, markgenes);
for(j=0; j<min2(ng,com.ndata); j++)
if(!sscanf(pline+markgenes[j],"%lf",&data.kappa[j])) break;
matout(F0, data.kappa, 1, min2(ng,com.ndata));
}
break;
case (25): com.fix_omega=(int)t; break;
case (26):
com.omega=t;
if(com.seqtype==1 && com.fix_omega && (com.clock==5 || com.clock==6)) {
ng = splitline (++pline, markgenes);
for(j=0; j<min2(ng,com.ndata); j++)
if(!sscanf(pline+markgenes[j],"%lf",&data.omega[j])) break;
matout(F0, data.omega, 1, min2(ng,com.ndata));
}
break;
case (27): com.fix_alpha=(int)t; break;
case (28):
com.alpha=t;
if(com.fix_alpha && t && (com.clock==5 || com.clock==6)) {
ng = splitline (++pline, markgenes);
for(j=0; j<min2(ng,com.ndata); j++)
if(!sscanf(pline+markgenes[j], "%lf", &data.alpha[j])) break;
matout(F0, data.alpha, 1, min2(ng,com.ndata));
}
break;
case (29): com.nalpha=(int)t; break;
case (30): com.ncatG=(int)t; break;
case (31): com.fix_rho=(int)t; break;
case (32): com.rho=t; break;
case (33): com.ndata=(int)t; break;
case (34): com.bootstrap=(int)t; break;
case (35): Small_Diff=t; break;
case (36): com.fix_blength=(int)t; break;
}
break;
}
}
if (iopt==nopt)
{ printf ("\noption %s in %s not recognised\n", opt,ctlf); exit(-1); }
}
fclose (fctl);
if((com.fix_kappa || (com.fix_alpha&&com.alpha)) && (com.clock==5 || com.clock==6))
printf("Using parameters from the control file.");
if (noisy) FPN(F0);
if(com.seqtype==1 || com.model==FromCodon)
setmark_61_64 ();
if (com.seqtype==AAseq || com.seqtype==CODON2AAseq) {
if(com.NSsites) error2("use NSsites=0 for amino acids?");
if(com.hkyREV && com.model!=FromCodon) /* REV & FromCodon not well-tested. */
error2("use hkyREV=0 for amino acids?");
com.ncode = 20;
if(com.aaDist==AAClasses)
com.nrate = com.nkappa=(com.hkyREV ? 5 : !com.fix_kappa);
switch (com.model) {
case (Poisson): case (EqualInput): case (Empirical): case (Empirical_F):
com.fix_kappa=1; com.kappa=0; com.nrate=0; break;
case (FromCodon):
com.nrate=com.nkappa = (com.hkyREV ? 5 : !com.fix_kappa);
if(com.aaDist) com.nrate++;
if(com.fix_omega) error2("fix_omega = 1");
if(com.codonf) {
com.codonf=0; puts("CodonFreq=0 reset for model=6.");
}
break;
case (REVaa_0): com.fix_kappa=0; com.kappa=0; break;
case (REVaa): com.fix_kappa=0; com.kappa=0; com.nrate=189; break;
default: error2("model unavailable");
}
if(com.Mgene>2 || (com.Mgene==2 && (com.model==Fequal||com.model==2)))
error2 ("Mgene && model");
if(com.seqtype==2 && com.model!=FromCodon && com.model!=AAClasses)
{ com.fix_kappa=com.fix_omega=1; com.kappa=com.omega=0; }
}
else if(com.seqtype==CODONseq) {
if(com.nparK)
if (com.model||com.aaDist||com.NSsites!=NSdiscrete||com.alpha||com.rho)
error2("HMM model option");
if(com.Mgene>1 && com.model) error2("Mgene & model?");
if(com.fix_kappa) {
if(com.hkyREV)
error2("can't fix kappa for the codon model you selected.");
else
com.pkappa[0] = com.kappa;
}
if(com.codonf>=FMutSel0 && com.Mgene>=2)
error2("model FMutSel + Mgene not implemented");
if(com.runmode==-2 && com.seqtype==1 && com.npi)
error2("runmode = -2 not implemented for codon models with frequencies");
if(com.hkyREV && (com.aaDist || com.Mgene>1))
error2("hkyREV with aaDist or Mgene: check options?\a");
if(com.NSsites<0 || com.NSsites>maxNSsitesModels || (com.NSsites>13 && com.NSsites<22))
error2("option NSsites.");
if(com.aaDist && com.NSsites)
error2("aaDist & NSsites don't work together");
if((com.model && com.aaDist)
&& (com.model>NSbranch2 || com.aaDist!=AAClasses))
error2("model & aaDist");
if(com.model==NSbranch3 && com.NSsites!=2 && com.NSsites!=3)
error2("clade model should have model = 3 NSsites = 2 or 3.");
if(com.aaDist && com.fix_omega)
error2("can't fix_omega for aaDist models");
com.nrate=com.nkappa = (com.hkyREV ? 5 : !com.fix_kappa);
/* pi_T, pi_C, pi_A are counted as frequency parameters pi. */
if(com.codonf==0)
com.npi = 0;
if(com.codonf==FMutSel0) /* FMutSel0: pi_TCA plus 20 AA freqs. */
com.npi = 3 + (com.npi ? 20-1 : 0);
else if(com.codonf==FMutSel) /* FMutSel: pi_TCA plus 60 codon freqs. */
com.npi = 3 + (com.npi ? com.ncode-1 : 0);
else if(com.npi) {
if (com.codonf==F1x4 || com.codonf==F1x4MG) com.npi = 3;
else if (com.codonf==F3x4 || com.codonf==F3x4MG) com.npi = 9;
else if (com.codonf==Fcodon) com.npi = com.ncode-1;
}
com.nrate += com.npi;
if (com.aaDist!=AAClasses) {
if(com.fix_kappa>1) error2("fix_kappa>1, not tested."); /** ???? */
if (com.model>0 && (com.alpha || !com.fix_alpha))
error2("dN/dS ratios among branches not implemented for gamma");
if (com.model>0 && com.clock)
error2("model and clock don't work together");
if (com.fix_omega) {
com.omega_fix=com.omega;
if((com.model==0 && com.NSsites==NSdiscrete)
|| (com.model && com.NSsites && com.NSsites!=NSpselection
&&com.NSsites!=NSdiscrete && com.NSsites!=NSbetaw))
error2("\afix_omega?");
}
if (com.model>NSbranch3) error2("seqtype or model.");
/*
if (com.model==NSbranch2 && com.clock==2)
error2("NSbranch & local clock.");
*/
if (com.model==NSbranch3 && com.NSsites==NSpselection && com.ncatG!=3)
{ com.ncatG=3; puts("ncatG=3 reset."); }
if(com.kappa<0) error2("kappa..");
if (com.runmode) com.fix_blength=0;
if(com.runmode==-2 && (com.NSsites||com.alpha||com.aaDist))
error2("wrong model for pairwise comparison.\ncheck NSsites, alpha, aaDist, model etc.");
if(com.runmode>0 && com.model==2) error2("tree search & model");
if(com.aaDist && com.NSsites!=0 && com.NSsites!=NSdiscrete)
error2("NSsites && aaDist.");
if((com.NSsites || nnsmodels>1) && (com.alpha || com.fix_alpha==0))
error2("NSsites & Gamma");
if(com.seqtype==1 && (com.alpha || com.fix_alpha==0))
puts("\aGamma codon model: are you sure this is the model you want to use? ");
if(com.aaDist==0) {
if((!com.fix_omega || (com.Mgene && com.Mgene>=3)) && !com.NSsites)
com.nrate++;
}
else {
if(com.aaDist<=6) com.nrate+=2; /* a & b, PSB2000 */
else if(com.aaDist==FIT1) com.nrate+=4; /* fitness models: */
else if(com.aaDist==FIT2) com.nrate+=5; /* ap, p*, av, v*, b */
if(com.aaDist>=FIT1)
for(i=0; i<2; i++)
for(j=0;j<20;j++) AAchem[i][j] /= AAchem[i][20];
}
if(com.NSsites) {
if(com.NSsites==NSfreqs && com.ncatG!=5)
{ puts("\nncatG changed to 5."); com.ncatG=5; }
if(com.model && com.NSsites)
if((com.model!=2 && com.model!=3)
|| (com.NSsites!=NSpselection && com.NSsites!=NSdiscrete))
error2("only NSsites=2,3 & model=2,3 are compatible.");
switch(com.NSsites) {
case (NSnneutral): com.ncatG=2; break;
case (NSpselection):
case (NSM2aRel):
com.ncatG=3; break;
case (NSbetaw): com.ncatG++; break;
case (NS02normal): com.ncatG++; break;
}
if(com.model==2) { /* branchsite models A & B */
if(com.ncatG!=3) puts("\abranch-site model: use ncatG=3 only.");
com.ncatG=4;
com.nrate += (com.NSsites==2?2:3);
}
else if(com.model==3) { /* Clade models C & D */
if(com.NSsites==NSpselection) {
com.ncatG=3; com.nrate+=3;
}
if(com.NSsites==NSdiscrete) {
if(com.ncatG!=2 && com.ncatG!=3)
error2("use 2 or 3 for ncatG for model=3?");
com.nrate += com.ncatG+1;
}
}
else if(com.NSsites==NSnneutral) {
if(!com.fix_omega) com.nrate++;
else { com.nrate++; com.omega_fix=com.omega; }
}
else if(com.NSsites==NSpselection || com.NSsites==NSM2aRel) {
if(!com.fix_omega) com.nrate+=2;
else { com.nrate++; com.omega_fix=com.omega; }
}
else if(com.NSsites==NSbetaw)
{ if(!com.fix_omega) com.nrate++; else com.omega_fix=com.omega; }
else if(com.NSsites==NSdiscrete && com.aaDist) {
if (com.aaDist<=6) com.nrate+=com.ncatG; /* a&b PSB2000 */
else { /* fitness models */
com.nrate=!com.fix_kappa+4*com.ncatG;
if(com.aaDist==FIT2) com.nrate+=com.ncatG;
}
}
else if(com.NSsites==NSdiscrete)
com.nrate+=com.ncatG; /* omega's */
else if(com.NSsites==NSTgamma || com.NSsites==NSTinvgamma) {
com.nrate += 2 + !com.fix_omega; com.ncatG=KGaussLegendreRule;
}
else if(com.NSsites==NSTgamma1 || com.NSsites==NSTinvgamma1) {
com.nrate += 3+!com.fix_omega; com.ncatG=KGaussLegendreRule+1;
}
}
}
}
else
error2 ("seqtype..");
if(com.runmode==-2 && com.cleandata==0) {
com.cleandata=1;
if(noisy) puts("gaps are removed for pairwise comparison.");
}
if(com.method &&(com.clock||com.rho))
{ com.method=0; puts("Iteration method reset: method = 0"); }
if(com.method && com.seqtype==2 && com.model==FromCodon)
{ com.method=0; puts("\awork on method = 1 for model = 6"); }
if (com.clock && com.fix_blength==2)
error2("can't fix branch lengths under clock model.");
if (com.runmode==3 && (com.clock)) error2("runmode+clock");
if (com.aaDist<=6 && (com.seqtype==CODONseq || com.model==FromCodon))
strcpy(com.daafile, daafiles[abs(com.aaDist)]);
if (com.fix_alpha && com.alpha==0) {
if (com.rho) puts("rho set to 0."); com.fix_rho=1; com.rho=0;
}
if(!com.fix_alpha && com.alpha<=0)
error2("initial value alpha <= 0 for fix_alpha = 0");
if(!com.fix_rho && com.rho==0) { com.rho=0.001; puts("init rho reset"); }
if(com.alpha||com.NSsites)
{ if(com.ncatG<2 || com.ncatG>NCATG) error2("ncatG"); }
else if (com.ncatG>1) com.ncatG=1;
if(com.ndata<=0) com.ndata=1;
if(com.bootstrap && com.ndata!=1) error2("ndata=1 for bootstrap.");
return(0);
}
int testx (double x[], int np)
{
/* This is used for LS branch length estimation by nls2, called only if(clock==0)
*/
int i;
double tb[]={.4e-6, 99};
FOR (i,com.ntime)
if (x[i]<tb[0] || x[i]>tb[1])
return (-1);
return (0);
}
int SetxBound (int np, double xb[][2])
{
int i=-1,j,k, K=com.ncatG;
double tb[]={4e-6,50}, tb0=1e-8, rgeneb[]={0.01,99}, rateb[]={1e-4,999};
double alphab[]={0.02,49}, betab[]={0.005,99}, omegab[]={0.0001,999};
double rhob[]={0.01,0.99}, pb[]={.00001,.99999};
SetxBoundTimes (xb);
for(i=com.ntime;i<np;i++) FOR (j,2) xb[i][j]=rateb[j];
for(i=com.ntime;i<np;i++) { xb[i][0]=rateb[0]; xb[i][1]=rateb[1]; }
for(i=0; i<com.nrgene; i++) for(j=0;j<2;j++) xb[com.ntime+i][j]=rgeneb[j];
for(i=0; i<com.nrate; i++) for(j=0;j<2;j++) xb[com.ntime+com.nrgene+i][j]=rateb[j];
k = com.ntime+com.nrgene+com.nkappa;
/* codon frequency parameters */
k += j = (com.seqtype==CODONseq && com.codonf>=FMutSel0 ? 3 : 0);
if(com.seqtype==CODONseq && com.npi>3
&& (com.codonf==Fcodon || com.codonf==FMutSel0 ||com.codonf==FMutSel)) {
for( ; j<com.npi; j++) {
xb[k][0] = -29; xb[k++][1] = 29;
}
}
/* omega parameters or those in the w distribution */
if (com.NSsites) { /* p's before w's in xb[] */
omegab[0] *= 0.01;
switch(com.NSsites) {
case(NSnneutral):
xb[k][0]=pb[0]; xb[k++][1]=pb[1]; /* p0 */
xb[k][0]=omegab[0]; xb[k++][1]=1; /* w0 < 1 */
break;
case(NSpselection): /* for p0, p1, w2 */
case(NSM2aRel): /* for p0, p1, w2 */
FOR(j,2) { xb[k][0]=-99; xb[k++][1]=99; } /* transformed p */
xb[k][0]=omegab[0]; xb[k++][1]=1; /* w0 < 1 */
if(!com.fix_omega && (com.model==0 || com.model==2)) { /* w2 > 1 */
xb[k][0] = (com.NSsites==NSpselection ? 1 : omegab[0]);
xb[k++][1] = omegab[1];
}
else if (com.model==3)
for(j=0; j<1+!com.fix_omega; j++) {
xb[k][0]=omegab[0]; xb[k++][1]=omegab[1];
}
break;
case(NSdiscrete): /* pK[] & rK[] */
if(com.model==3) { /* Clade model D */
if(com.nparK) error2("model & NSsites & nparK");
FOR(j,K-1) { xb[k][0]=-99; xb[k++][1]=99; }
FOR(j,K+1) { xb[k][0]=omegab[0]; xb[k++][1]=omegab[1]; }
}
else if(com.model==2) { /* branch-site model B */
K=3;
if(com.nparK==0)
FOR(j,K-1) { xb[k][0]=-99; xb[k++][1]=99; }
FOR(j,K) { xb[k][0]=omegab[0]; xb[k++][1]=omegab[1]; }
if(com.nparK)
FOR(j,K*(K-1)) { xb[k][0]=-99; xb[k++][1]=99; }
}
else { /* NSsites models M3 */
FOR(j,K-1) { xb[k][0]=-99; xb[k++][1]=99; }
FOR(j,K) { xb[k][0]=omegab[0]; xb[k++][1]=omegab[1]; }
}
if(com.seqtype==CODONseq && com.aaDist)
FOR(j,K) { xb[k][0]=omegab[0]; xb[k++][1]=omegab[1]; }
break;
case(NSfreqs): /* p0...pK */
FOR(j,K-1) { xb[k][0]=-99; xb[k++][1]=99; }
break;
case(NSgamma):
FOR(j,2) { xb[k][0]=alphab[0]; xb[k++][1]=alphab[1]; } break;
case(NS2gamma): /* p0, alpha1,beta1,alpha2=beta2 */
xb[k][0]=pb[0]; xb[k++][1]=pb[1];
FOR(j,3) { xb[k][0]=alphab[0]; xb[k++][1]=alphab[1]; }
break;
case(NSbeta): /* p_beta,q_beta */
FOR(j,2) { xb[k][0]=betab[0]; xb[k++][1]=betab[1]; }
break;
case(NSbetaw):
/* p0, p_beta, q_beta, w */
xb[k][0]=pb[0]; xb[k++][1]=pb[1]; /* p0 */
FOR(j,2) { xb[k][0]=betab[0]; xb[k++][1]=betab[1]; } /* p & q */
if(!com.fix_omega) { xb[k][0]=1; xb[k++][1]=omegab[1]; }
break;
case(NSbetagamma): /* p0, p_beta, q_beta, alpha, beta */
xb[k][0]=pb[0]; xb[k++][1]=pb[1]; /* p0 */
FOR(j,4) { xb[k][0]=betab[0]; xb[k++][1]=betab[1]; } /* p&q, a&b */
break;
case(NSbeta1gamma): /* p0, p_beta, q_beta, alpha, beta */
xb[k][0]=pb[0]; xb[k++][1]=pb[1]; /* p0 */
FOR(j,4) { xb[k][0]=betab[0]; xb[k++][1]=betab[1]; } /* p&q, a&b */
break;
case(NSbeta1normal): /* p0, p_beta, q_beta, mu, s */
xb[k][0]=pb[0]; xb[k++][1]=pb[1]; /* p0 */
FOR(j,4) { xb[k][0]=betab[0]; xb[k++][1]=betab[1]; } /* p&q, mu&s */
xb[k-2][0]=1; xb[k-2][1]=9; /* mu */
break;
case(NS02normal): /* p0, p1, mu2, s1, s2 */
FOR(j,2) { xb[k][0]=pb[0]; xb[k++][1]=pb[1]; } /* p0 & p1, */
FOR(j,3) { xb[k][0]=.0001; xb[k++][1]=29; } /* mu2,s1,s2 */
break;
case(NS3normal): /* p0, p1, mu2, s0, s1, s2 */
FOR(j,2) { xb[k][0]=-49; xb[k++][1]=49; } /* p0 & p1, tranformed */
FOR(j,4) { xb[k][0]=.0001; xb[k++][1]=29; } /* mu2,s0,s1,s2 */
break;
case(NSTgamma):
case(NSTinvgamma):
case(NSTgamma1):
case(NSTinvgamma1):
if(com.NSsites==NSTgamma1 || com.NSsites==NSTinvgamma1) /* p0 for G(a,b,T) */
{ xb[k][0]=0.001; xb[k++][1]=0.9999; }
/* alpha */
xb[k][0]=0.05;
if(com.NSsites==NSTinvgamma || com.NSsites==NSTinvgamma1)
xb[k][0]=1.05;
xb[k++][1]=alphab[1]; /* alpha */
xb[k][0]=0.05; xb[k++][1]=betab[1]; /* beta */
if(!com.fix_omega)
{ xb[k][0]=1; xb[k++][1]=29; } /* T */
break;
}
}
else if((com.seqtype==CODONseq||com.model==FromCodon) && com.aaDist!=AAClasses)
{ if(!com.fix_omega) { xb[k][0]=omegab[0]; xb[k][1]=omegab[1]; } }
if(com.seqtype==CODONseq && com.model)
for(j=0; j<com.nOmega-com.fix_omega; j++)
{ xb[k+j][0]=omegab[0]; xb[k+j][1]=omegab[1]; }
if (com.aaDist<0 && (com.seqtype==1||com.model==FromCodon)) {
/* linear relationship between d_ij and w_ij */
if(com.nrate != !com.fix_kappa+1+(com.seqtype==1)) error2("in Setxbound");
xb[com.ntime+com.nrgene+!com.fix_kappa][1]=1; /* 0<b<1 */
}
k=com.ntime+com.nrgene+com.nrate;
for (i=0;i<com.nalpha;i++,k++) FOR (j,2) xb[k][j]=alphab[j];
if (!com.fix_rho) FOR (j,2) xb[np-1][j]=rhob[j];
if(noisy>=3 && np<100) {
printf("\nBounds (np=%d):\n",np);
for(i=0;i<np;i++) printf(" %10.6f", xb[i][0]); FPN(F0);
for(i=0;i<np;i++) printf(" %10.6f", xb[i][1]); FPN(F0);
}
return(0);
}
void getpcodonClass(double x[], double pcodonClass[])
{
/* This uses pcodon0[], paa0[], and x[] to calculate pcodonclass[] and
com.pi[] for the fitness models.
pcodon0[] has the codon frequencies observed (codonFreq=3) or expected
(codonFreq=2 or 1 or 0) rootally. Under the fitness models, the expected
codon frequencies pcodonClass[] differs among site classes and from the
rootal pi[] (pcodon0[]).
This is called by SetParameters().
*/
int i,iclass,iaa, k, nclass=(com.NSsites==0?1:com.ncatG);
double paaClass[20], *w,fit;
if(com.seqtype!=1 || com.aaDist<FIT1) error2("getpcodonClass");
k=com.ntime+com.nrgene+!com.fix_kappa+nclass-1;
FOR(iclass, nclass) {
w=x+k+iclass*(4+(com.aaDist==FIT2));
FOR(iaa,20) {
fit = -w[0]*square(AAchem[0][iaa]-w[1])
-w[2]*square(AAchem[1][iaa]-w[3]);
paaClass[iaa]=exp(2*fit);
}
abyx(1/sum(paaClass,20), paaClass, 20);
FOR(i,com.ncode) {
iaa=GeneticCode[com.icode][FROM61[i]];
pcodonClass[iclass*64+i]=pcodon0[i]/paa0[iaa]*paaClass[iaa];
}
if(fabs(1-sum(pcodonClass+iclass*64,com.ncode))>1e-5) error2("pcodon!=1");
/*
fprintf(frst,"\nSite class %d: ",iclass+1);
matout (frst,paaClass,2, 10);
matout (frst,pcodonClass+iclass*64,16,4);
*/
}
if(nclass==1) FOR(i,com.ncode) com.pi[i]=pcodonClass[i];
}
int GetInitialsCodon (double x[])
{
/* This sets the initials and count com.np for codon models.
*/
int k=com.ntime+com.nrgene, i,j, K=com.ncatG, nsyncodon[20];
double mr=0;
if(com.nrate) { /* either kappa, omega, or both for each gene */
if(com.Mgene<=2) {
if(com.hkyREV) {
x[k++]=.5+rndu();
for(i=0; i<4; i++) x[k++]=.1+rndu();
}
else if (!com.fix_kappa)
x[k++] = com.kappa;
if(com.codonf==FMutSel0 || com.codonf==FMutSel) {
for(i=0;i<3;i++) /* pi_TCA */
x[k++] = com.pf3x4[i]/(com.pf3x4[3]+.02*rndu());
if(com.npi>3 && com.codonf==FMutSel0) {
for(i=0; i<20; i++) nsyncodon[i]=0;
for(i=0; i<com.ncode; i++)
nsyncodon[GeneticCode[com.icode][FROM61[i]]] ++;
for(i=0; i<20-1; i++) /* amino acid fitness, ignoring nsyncodon */
x[k++] = log((com.piAA[i]/nsyncodon[i]+.001)/(com.piAA[19]/nsyncodon[19]+.002*rndu()));
}
else if(com.npi>3 && com.codonf==FMutSel) {
for(i=0;i<com.ncode-1;i++) /* codon fitness */
x[k++] = log((com.pi[i]+.001)/(com.pi[com.ncode-1]+.002*rndu()));
}
}
else if(com.npi) {
if(com.codonf==Fcodon)
for(i=0;i<com.ncode-1;i++) /* codon fitness */
x[k++] = log((com.pi[i]+.001)/(com.pi[com.ncode-1]+.002*rndu()));
else if(com.codonf==F1x4 || com.codonf==F1x4MG)
for(i=0;i<3;i++) /* pi_TCA */
x[k++] = com.pf3x4[i]/(com.pf3x4[3]+.02*rndu());
else if(com.codonf==F3x4 || com.codonf==F3x4MG)
for(j=0; j<3; j++)
for(i=0;i<3;i++) /* pi_TCA */
x[k++] = com.pf3x4[j*4+i]/(com.pf3x4[j*4+3]+.02*rndu());
}
if (com.NSsites==0 && com.model==0) {
if (!com.aaDist)
{ if(!com.fix_omega) x[k++]=com.omega; }
else if (com.aaDist==AAClasses)
for(i=0; i<com.nOmegaType; i++)
x[k++]=0.11+0.1*rndu();
else
{ x[k++]=0.11+0.1*rndu(); x[k++]=0.22+0.1*rndu(); }
}
}
else { /* com.Mgene==3,4 */
if(com.Mgene>=3) {
com.nrate *= com.ngene;
if(com.fix_omega) com.nrate--;
}
for(i=0; i<com.ngene; i++) {
if(com.hkyREV)
error2("hkyREV for ngene>1. Fix me.");
if(!com.fix_kappa && !com.fix_omega)
{ x[k++] = com.kappa; x[k++] = com.omega; }
else if (com.fix_kappa)
x[k++] = com.omega;
else if (com.fix_omega) {
x[k++] = com.kappa;
if(i!=com.ngene-1) x[k++] = com.omega;
}
}
}
}
if(com.model && com.model<=NSbranch3) { /* branch models */
if (com.model==NSbranchB) {
com.nbtype = tree.nbranch;
for(i=0; i<tree.nbranch; i++)
nodes[(int)tree.branches[i][1]].label = i;
}
if(com.NSsites==0) {
com.nOmega = com.nbtype;
if(com.aaDist==0)
com.nrate = com.nkappa+!com.fix_omega+com.nbtype-1;
else if (com.aaDist==AAClasses)
com.nrate = com.nkappa + com.nOmegaType*com.nbtype;
else if (com.model==NSbranchB || com.model==NSbranch2)
com.nrate += (com.model==NSbranchB ? tree.nbranch : com.nOmega-1+!com.fix_omega);
k = com.ntime+com.nrgene;
for(i=0; i<com.nrate; i++)
x[k++] = com.omega * (0.8+0.4*rndu());
}
}
if (com.NSsites==0 && com.nrate==0)
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr, (com.nkappa>1?x+com.ntime+com.nrgene:&com.kappa), com.omega,PMat);
/* branch-site and clade models
com.nOmega=2 different w's at a site (three w's in the model: w0,w1,w2) */
if(com.model && com.NSsites) {
if(com.model==NSbranch2) { /* branch-site models A & B */
com.ncatG=4; K=3;
if(com.NSsites==NSdiscrete)
com.nrate = com.nkappa +com.npi + 2 +!com.fix_omega+com.nbtype-1-1; /* add w0 and w1 */
else
com.nrate = com.nkappa +com.npi +1+!com.fix_omega+com.nbtype-1-1;
}
/* add p0 and p1. check that this works for NSbranch2 */
k = com.ntime+com.nrgene+com.nkappa+com.npi;
if(com.model<=NSbranch2) { /* branch-site models A & B */
/* p0 and p1: x[0,1]=1,0, for p[]=0.6 0.2 */
x[k++] = 1+0.5*rndu();
if(K==3) x[k++] = 0.2*rndu();
if(com.NSsites == 2) /* w0<1, w1=1 (if present) */
x[k++] = 0.2+0.1*rndu();
else if(com.NSsites == NSdiscrete) { /* w0 and w1 for model B */
x[k++] = 0.2*rndu();
if(K==3) x[k++] = 0.4+.8*rndu();
}
if(!com.fix_omega)
x[k++] = com.omega + 1 + rndu(); /* w2 */
}
else { /* NSbranch3: clade models C and D */
x[k++] = 1 + rndu();
if(com.ncatG == 3) x[k++] = .5+rndu(); /* p0 and p1 */
if(com.NSsites == NSpselection) /* w0<1, w1=1 (if present) */
x[k++] = 0.2+0.2*rndu();
else if(com.NSsites == NSdiscrete) { /* w0 and w1 */
x[k++] = 0.2+0.2*rndu();
if(com.ncatG==3) x[k++] = 0.5+.5*rndu();
}
for(i=0; i<com.nbtype-1; i++) /* additional w's */
x[k++] = com.omega*(1+0.5*rndu());
if(!com.fix_omega)
x[k++] = com.omega*(1+0.5*rndu());
}
}
else if (com.NSsites) { /* w's are counted in com.nrate */
switch(com.NSsites) {
case(NSnneutral):
x[k++]=0.5+0.4*rndu(); /* p0 for w0<1 */
x[k++]=0.1+0.5*rndu(); /* w0<1 */
break;
case(NSpselection): /* for p0, p1. w is counted in nrate. */
case(NSM2aRel):
x[k++] = 0.8+rndu(); x[k++]=.1+.5*rndu(); /* p0, p1 */
x[k++] = 0.1+0.4*rndu(); /* w0<1 */
if(!com.fix_omega) {
x[k++] = com.omega*(1+0.2*rndu()); /* w2 */
if(com.omega<1 && com.NSsites==NSpselection) {
puts("\ninitial w for M2:NSpselection reset.");
x[k-1] = 2+rndu();
}
}
break;
case(NSdiscrete):
if(com.aaDist) {
for(i=0; i<com.ncatG-1; i++) x[k++]=0.;
if(com.aaDist<=6)
for(i=0;i<com.ncatG;i++) { x[k++]=1.1; x[k++]=1.2; }
for(i=0;i<com.ncatG;i++) /* ap,p*,av,v*, and b for each site class */
FOR(j,4+(com.aaDist==FIT2)) x[k++]=rndu();
}
else if(com.nparK) { /* K*(K-1) paras in HMM of dN/dS over sites */
zero(x+k,com.ncatG*(com.ncatG-1));
k += com.ncatG*(com.ncatG-1);
}
else { /* p0...pK. Note that w's are counted in nrate */
for(i=0;i<com.ncatG-1;i++) x[k++]=rndu();
for(i=0;i<com.ncatG;i++)
x[k++]=com.omega * (.5+i*2./com.ncatG*(0.8+0.4*rndu()));
}
break;
case(NSfreqs): /* p0...pK. w's are fixed */
for(i=0;i<com.ncatG-1;i++) x[k++]=(com.ncatG-j)/2.;
break;
case(NSgamma): x[k++]=1.1; x[k++]=1.1; break;
case(NS2gamma): /* p0, alpha1,beta1,alpha2=beta2 */
x[k++]=0.5; FOR(j,3) x[k++]=2*rndu()+j*0.1; break;
case(NSbeta): /* p_beta,q_beta */
x[k++]=.2+rndu(); x[k++]=1+rndu(); break;
case(NSbetaw):
/* p0, p_beta, q_beta. w is counted in nrate. */
x[k++]=.9; x[k++]=.2+rndu(); x[k++]=1+rndu();
if(!com.fix_omega) {
x[k++]=com.omega;
if(com.omega<1) {
puts("\ninitial w for M8:NSbetaw>1 reset.");
x[k-1]=2+rndu();
}
}
break;
case(NSbetagamma): /* p0, p_beta, q_beta, alpha, beta */
x[k++]=.9; x[k++]=.4; x[k++]=1.2; x[k++]=1.1; x[k++]=1.1;
break;
case(NSbeta1gamma): /* p0, p_beta, q_beta, alpha, beta */
x[k++]=.9; x[k++]=.4; x[k++]=1.2; x[k++]=.1; x[k++]=1.1;
break;
case(NSbeta1normal): /* p0, p_beta, q_beta, alpha, beta */
x[k++]=.95; x[k++]=.4; x[k++]=1.2; x[k++]=1.1; x[k++]=1.1;
break;
case(NS02normal): /* p0, p1, mu2, s1, s2 */
x[k++]=.8; x[k++]=0.3; /* p0 & p1, not transformed */
x[k++]=.2; /* mu2 */
x[k++]=5; x[k++]=1.1; /* s1,s2 */
break;
case(NS3normal): /* p0, p1, mu2, s0, s1, s2 */
x[k++]=.77; x[k++]=0.22; /* p0 & p1, transformed */
x[k++]=.2; /* mu2 */
x[k++]=0.5; x[k++]=5; x[k++]=1.1; /* s0,s1,s2 */
break;
case(NSTgamma): /* alpha, beta, T */
case(NSTgamma1): /* p0, alpha, beta, T */
if(com.NSsites==NSTgamma1)
x[k++]=0.8+0.2*rndu(); /* p0, not transformed */
x[k++]=2+rndu(); x[k++]=3+rndu();
if(!com.fix_omega) x[k++]=1.+rndu();
break;
case(NSTinvgamma): /* alpha, beta, T */
case(NSTinvgamma1): /* p0, alpha, beta, T */
if(com.NSsites==NSTinvgamma1)
x[k++]=0.8+0.2*rndu(); /* p0, not transformed */
x[k++]=3+rndu(); x[k++]=0.8+0.2*rndu(); /* mean = b/(a-1) */
if(!com.fix_omega) x[k++]=1.+rndu();
break;
}
} /* if(com.NSsites) */
com.np = k;
return(0);
}
int GetInitials (double x[], int* fromfile)
{
/* This caculates the number of parameters (com.np) and get initial values.
This routine is too messy. Perhaps try to restruct the code and make
two sections for amino acids and codons?
com.nrate is initialised in getoptions().
*/
static int times=0;
int i, j,k=0, naa=20;
int K=(com.model==2&&com.NSsites?com.ncatG-1:com.ncatG);
size_t sconP_new = (size_t)(tree.nnode-com.ns)*com.ncode*com.npatt*sizeof(double);
double t;
NFunCall = NPMatUVRoot = NEigenQ = 0;
if(com.clock==ClockCombined && com.ngene<=1)
error2("Combined clock model requires mutliple genes.");
GetInitialsTimes(x);
com.plfun = (com.alpha==0 ? lfun : (com.rho==0?lfundG:lfunAdG));
if(com.NSsites) com.plfun=lfundG;
if(com.nparK) com.plfun=lfunAdG;
if(com.plfun==lfun) com.conPSiteClass=0;
if(com.method && com.fix_blength!=2 && com.plfun==lfundG) {
com.conPSiteClass=1;
sconP_new *= com.ncatG;
}
if(com.sconP<0 || sconP_new<0) error2("data set too large.");
if(com.sconP<sconP_new) {
com.sconP = sconP_new;
printf("\n%9lu bytes for conP, adjusted\n", com.sconP);
if((com.conP=(double*)realloc(com.conP, com.sconP))==NULL)
error2("oom conP");
}
InitializeNodeScale();
if(times++==0) {
if((com.aaDist && com.aaDist<10 && com.aaDist!=AAClasses &&
(com.seqtype==CODONseq||com.model==FromCodon)) ||
(com.seqtype==AAseq &&
(com.model==Empirical||com.model==Empirical_F||com.model>=REVaa_0))){
GetDaa(NULL,com.daa);
}
}
com.nrgene = (!com.fix_rgene)*(com.ngene-1);
for(j=0; j<com.nrgene; j++) x[com.ntime+j] = 1;
if(com.seqtype==CODONseq)
GetInitialsCodon(x);
else {
com.np = com.ntime+com.nrgene+com.nrate;
k=com.ntime+com.nrgene;
if (com.aaDist==AAClasses) {
if (!com.fix_kappa) x[k++]=com.kappa;
for(i=0; i<com.nrate-!com.fix_kappa; i++)
x[k++] = com.omega;
if (com.nOmegaType>65)
puts("\a\nget better initial values for AAclasses?");
}
else {
if (com.seqtype==AAseq) { /* AAseq */
if (com.nrate==0)
EigenQaa(NULL, Root, U, V, &t); /* once for all */
if (com.model==REVaa_0) {
for(i=0;i<naa;i++) for(j=0;j<i;j++)
if (AA1STEP[i*(i-1)/2+j] && i*naa+j!=ijAAref)
x[k++] = com.daa[i*naa+j];
}
else if (com.model==REVaa) {
for (i=1; i<naa; i++) for(j=0; j<i; j++)
if(i*naa+j != ijAAref) x[k++] = com.daa[i*naa+j];
}
else if (com.model==FromCodon) {
for(j=0; j<com.nkappa; j++) x[k++] = com.kappa;
for(j=0; j<com.nrate-com.nkappa; j++) x[k++] = com.omega;
}
}
}
}
for (i=0; i<com.nalpha; i++) x[com.np++] = com.alpha;
if (!com.fix_rho) x[com.np++] = com.rho;
if (com.rho)
AutodGamma (com.MK, com.freqK, com.rK, &t, com.alpha, com.rho,com.ncatG);
else if (com.alpha && com.fix_alpha && !com.NSsites)
DiscreteGamma(com.freqK,com.rK,com.alpha,com.alpha,com.ncatG,DGammaUseMedian);
if(com.fix_blength==-1)
for(i=0; i<com.np; i++) x[i] = (i<com.ntime ? .1+0.5*rndu() : 0.5+rndu());
*fromfile=0;
if(finitials) {
readx(x,fromfile);
if(com.runmode>0 && fromfile && com.NSsites) LASTROUND=1;
}
return (0);
}
int SetPGene (int igene, int _pi, int _UVRoot, int _alpha, double x[])
{
/* xcom[] does not contain time parameters
Note that com.piG[][] have been homogeneized if (com.Mgene==3)
Note calculation of nr1 for (com.Mgene>=3 && com.fix_omega), as only the
w for the last partition is fixed.
*/
int nr1=(com.nrate+1)/com.ngene, k=com.nrgene+(com.Mgene>=3)*igene*nr1;
double *xcom=x+com.ntime, mr=0;
if (_pi) {
xtoy (com.piG[igene],com.pi,com.ncode);
#if(defined(CODEML))
if(com.codonf==F1x4MG || com.codonf==F3x4MG)
com.pf3x4 = com.f3x4[igene];
#endif
}
if (_UVRoot) {
if (com.seqtype==CODONseq) {
if(!com.fix_kappa) com.kappa=xcom[k++];
if(!com.fix_omega) com.omega=xcom[k++];
else
com.omega = (com.Mgene>2&&igene<com.ngene-1?xcom[k++]:com.omega_fix);
if (!com.NSsites)
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr,
(com.hkyREV||com.codonf==FMutSel?&xcom[com.nrgene]:&com.kappa),com.omega,PMat);
}
else
EigenQaa(NULL, Root, U, V, xcom+k);
}
if (_alpha) {
com.alpha=xcom[com.nrgene+com.nrate+igene];
DiscreteGamma (com.freqK, com.rK, com.alpha, com.alpha, com.ncatG, DGammaUseMedian);
}
return (0);
}
int SetParametersNSsites (double x[])
{
/* for NSsites and NSbranchsite models including HMM, NSclade models
p's are before w's in x[].
w[2][3] holds omegas; w[i][j] for fore (i=0) or back (i=1) branches
in site class j.
A & B: branch-site models: (model=2, NSsites=2 or 3)
iclass
0 1 2 3
back w0 w1 w0 w1
fore w0 w1 w2 w2
C & D: clade-site models: (model=3, NSsites=2 or 3)
(D: nbtype = 2)
iclass
0 1 2
b0 w0 w1 w2
b1 w0 w1 w3
b2 w0 w1 w4
......
*/
int k0=com.ntime+com.nrgene+com.nkappa+com.npi, k=k0;
int K=com.ncatG, i,j, off;
double w[NBTYPE][3], t, S,dS,dN, spaceP2PI[NCATG*(NCATG+1)], small=1e-4;
double mr, f;
double p0=1, c,e,eT, dwy, y,z, a, b, T, C, lnGa, ww, sign, *xI=NULL, *wI=NULL; /* truncated NSsites models */
if(com.NSsites==0) error2("SetParametersNSsites : strange.");
switch(com.NSsites) {
case(NSnneutral):
com.freqK[0] = x[k++];
com.freqK[1] = 1-com.freqK[0];
com.rK[0] = x[k++];
com.rK[1] = 1;
break;
case(NSpselection):
case(NSM2aRel):
case(NSdiscrete):
if(com.model == NSbranch2) /* branch-site A&B (Y&N2002) */
K = com.ncatG-1;
if(com.nparK) { /* HMM models, setting up p[] & w[] */
for(j=0; j<K; j++) /* w's for site classes */
com.rK[j] = x[k++];
for (i=0; i<K; i++, k+=K-1) {
if (!LASTROUND) f_and_x(x+k,com.MK+i*K,K,0,0); /* x->f */
else xtoy (x+k,com.MK+i*K,K-1);
com.MK[i*K+K-1] = 1-sum(com.MK+i*K,K-1);
}
PtoPi(com.MK, com.freqK, K, spaceP2PI);
break;
}
/* *** Note: Falling through.
This sets up p[] for NSpselection, NSdiscrete, NSfreqs
*/
case(NSfreqs):
if (!LASTROUND) {
f_and_x(x+k,com.freqK,K,0,1); /* x->f */
k += K-1;
}
else {
for(j=0,com.freqK[K-1]=1; j<K-1; j++)
com.freqK[K-1] -= (com.freqK[j] = x[k++]);
if(com.freqK[K-1]<-small || com.freqK[K-1]>1+small) {
matout(F0, com.freqK, 1, K);
error2("freqK[]");
}
}
/* setting up w[] */
if(com.NSsites == NSfreqs) {
if(com.ncatG!=5) error2("NSfreqs, ncatG?");
com.rK[0] = 0;
com.rK[1] = 1./3;
com.rK[2] = 2./3;
com.rK[3] = 1;
com.rK[4] = 3;
}
else if(com.NSsites == NSpselection || com.NSsites == NSM2aRel) {
com.rK[0] = x[k++];
com.rK[1] = 1;
com.rK[2] = (com.fix_omega && com.model<=2 ? com.omega_fix : x[k++]);
}
else if(com.NSsites == NSdiscrete && com.aaDist == 0) {
for(j=0; j<K; j++)
com.rK[j] = x[k++];
}
if(com.model) { /* branch-site and clade models */
if(com.model == NSbranch2) { /* branch-site models */
w[0][0] = w[1][0] = com.rK[0]; /* site class 0 */
w[0][1] = w[1][1] = com.rK[1]; /* site class 1 */
w[0][2] = -1;
w[1][2] = com.rK[2];
}
else { /* clade models */
k--;
for(i=0; i<com.nbtype; i++) {
for(j=0; j<K-1; j++)
w[i][j] = com.rK[j];
w[i][K-1] = (i==com.nbtype-1 && com.fix_omega ? com.omega_fix : x[k++]);
}
}
}
break;
case(NSgamma):
case(NS2gamma):
case(NSbeta):
case(NSbetaw):
case(NSbetagamma):
case(NSbeta1gamma):
case(NSbeta1normal):
case(NS02normal):
case(NS3normal):
DiscreteNSsites(x+k);
break;
}
/* rK[] & freqK[] for truncated nssites models. */
if(com.NSsites>=NSTgamma && com.NSsites<=NSTinvgamma1) {
off = (com.NSsites==NSTgamma1||com.NSsites==NSTinvgamma1);
if(off) {
K = com.ncatG-1;
p0 = x[k];
com.rK[K] = 1;
com.freqK[K] = 1 - p0;
}
a = x[k+off];
b = x[k+off+1];
T = (com.fix_omega ? com.omega_fix : x[k+off+2]);
K = com.ncatG-off;
lnGa = LnGamma(a);
if(com.NSsites==NSTgamma || com.NSsites==NSTgamma1) {
C = CDFGamma(T, a, b);
mr = a/(C*b)*CDFGamma(T, a+1, b);
}
else {
C = 1 - CDFGamma(1/T, a, b);
mr = b/(C*(a-1))*( 1 - CDFGamma(1/T, a-1, b) );
}
GaussLegendreRule(&xI, &wI, K);
/* w changes monotonically from 0 to T. */
for(j=0; j<K; j++) {
if(j<K/2) { i = K/2-1-j; sign=-1; }
else { i = j-K/2; sign=1; }
#if(0) /* linear transform */
y = sign*xI[i];
com.rK[j] = ww = (1+y)*T/2;
dwy = T/2;
#else /* exponential transform */
c = 1;
eT = exp(-c*T);
y = -sign*xI[i];
z = 1 + eT + y - y*eT;
com.rK[j] = ww = -1/c*log(z/2);
dwy = (1 - eT)/(c*z);
#endif
if(com.NSsites==NSTgamma || com.NSsites==NSTgamma1)
com.freqK[j] = exp( a*log(b*ww)-lnGa-b*ww )/(ww*C) * p0*wI[i]*dwy;
else
com.freqK[j] = exp( a*log(b/ww)-lnGa-b/ww ) /(ww*C) * p0*wI[i]*dwy;
}
/*
printf("\na b T lnGa=%9.5f%9.5f%9.5f %9.5f\nf & w:\n", a,b,T, lnGa);
FOR(j,com.ncatG) printf("%13.5f", com.freqK[j]); FPN(F0);
FOR(j,com.ncatG) printf("%13.5f", com.rK[j]); FPN(F0);
*/
}
/* For NSsites models, calculates Qfactor_NS, to be used in EigenQcodon().
For branch-site and clade models, calculate Qfactor_NS[] and also
UVRoot for different omega's.
*/
k = k0;
if(com.model == 0) { /* NSsites models */
if(com.aaDist==0) {
if(com.NSsites<NSTgamma || com.NSsites>NSTinvgamma1) /* mr already calculated for truncated models */
for(j=0,mr=0; j<com.ncatG; j++)
mr += com.freqK[j]*com.rK[j];
Qfactor_NS = -1;
EigenQcodon(0,-1,&S,&dS,&dN,NULL,NULL,NULL, &Qfactor_NS, com.pkappa, mr, PMat);
}
else {
for(j=0,Qfactor_NS=0; j<com.ncatG; j++) {
if(com.aaDist<10)
com.pomega = x+k+com.ncatG-1+2*j;
else if(com.aaDist >= FIT1) {
com.pomega = x+k+com.ncatG-1+j*(4+(com.aaDist==FIT2));
xtoy(pcodonClass+j*64, com.pi, com.ncode);
}
mr = -1;
EigenQcodon(0,-1,&S,&dS,&dN,NULL,NULL,NULL, &mr, com.pkappa, com.rK[j], PMat);
Qfactor_NS += com.freqK[j]*mr;
}
}
Qfactor_NS = 1/Qfactor_NS;
if(NFunCall==1) printf("Qfactor_NS = %.6f\n", Qfactor_NS);
}
else if (com.model == NSbranch2) { /* branch&site models */
t = com.freqK[0] + com.freqK[1];
if(t<1e-100)
error2("p0 + p1 too small for branch&site model?");
com.freqK[2] = (1-t)*com.freqK[0]/t;
com.freqK[3] = (1-t)*com.freqK[1]/t;
/* calculates scale factors: background branches has two site classes
while foreground branches has 3 site classes */
for(i=0; i<2; i++) { /* i=0 back (2 site classes); i=1 fore (3 classes) */
for(j=0,mr=0; j<(i==0?2:3); j++) {
com.omega = w[i][j];
f = com.freqK[j];
if(i==0) f = com.freqK[j]/t;
else if(j==2) f = 1-t;
if(NFunCall==1) printf("branch=%d freq=%.6f w%d = %.6f\n", i,f,j,com.omega);
mr += f*com.omega;
}
Qfactor_NS_branch[i] = -1;
EigenQcodon(0,-1,&S,&dS,&dN,NULL,NULL,NULL, &Qfactor_NS_branch[i], com.pkappa, mr, PMat);
Qfactor_NS_branch[i] = 1/Qfactor_NS_branch[i];
if(NFunCall==1) printf("\t\t\tQfactor for branch %d = %.6f\n", i,Qfactor_NS_branch[i]);
}
/* calculates 3 sets of U&V&Root vectors (w0,w1,w2), for GetPMatBranch().
No EigenQcodon() calls are needed in ConditionalPNode() or minbranches().
*/
for(i=0; i<3; i++) { /* (w0,w1,w2) */
if(NFunCall==1) printf("w[%d] = %.6f\n", i, w[1][i]);
mr = 1;
EigenQcodon(1,-1,NULL,NULL,NULL,_Root[i],_UU[i],_VV[i], &mr, com.pkappa,w[1][i],PMat);
}
}
else { /* NSbranch3: Clade models C and D */
/* calculates Qfactor_NS_branch[nbtype]: each branch has K=com.ncatG site classes */
for(i=0; i<com.nbtype; i++) {
for(j=0,mr=0; j<K; j++)
mr += com.freqK[j] * w[i][j];
Qfactor_NS_branch[i] = -1;
EigenQcodon(0,-1,NULL,NULL,NULL,NULL,NULL,NULL, &Qfactor_NS_branch[i], com.pkappa,mr,PMat);
Qfactor_NS_branch[i] = 1/Qfactor_NS_branch[i];
if(NFunCall==1) printf("\t\t\tQfactor for branch=%d = %.6f\n", i,Qfactor_NS_branch[i]);
}
/* calculates K-1+nbtype sets of U&V&Root vectors (w0,w1,w2, w3,...), for GetPMatBranch().
*/
for(i=0; i<K-1+com.nbtype; i++) {
mr = 1;
com.omega = (i < K-1 ? w[0][i] : w[i-K+1][K-1]);
EigenQcodon(1,-1,NULL,NULL,NULL,_Root[i],_UU[i],_VV[i], &mr, com.pkappa,com.omega,PMat);
}
}
return(0);
}
int Set_UVR_BranchSite (int iclass, int branchlabel)
{
/* There are 3 different w's in the branch-site models A & B, and nbtype+2
different w's in the clade models C & B, so there are the same number of
sets of U&V&Root. This routine points out the right set.
*/
int iUVR=0;
if(com.model==0 || com.NSsites==0) error2("should not be here.");
if(com.model<=NSbranch2) { /* branch-site models A & B */
if(branchlabel==0) iUVR = iclass%2; /* back, w0 w1 */
else iUVR = (iclass<=1 ? iclass : 2); /* fore, w0 w1 w2 */
}
else { /* clade models C & D */
if(iclass<com.ncatG-1) iUVR = iclass;
else iUVR = com.ncatG-1 + branchlabel;
}
U = _UU[iUVR];
V = _VV[iUVR];
Root = _Root[iUVR];
return (iUVR);
}
int GetCodonFreqs (void)
{
/* This is called by SetParameters() and calculates the expected base or codon frequencies
(com.pf3x4[] & com.pi[]) using the parameters under the model com.codonf.
This is used for models in which codon frequency parameters are estimated from
the data by ML. Modified from GetCodonFreqs2().
com.pi[] is modified.
The routine does not work if com.ngene>1.
*/
int n=com.ncode, i,j,k, ic,iaa,b[3];
double *ppi=com.ppi, mutbias[20], y;
if (com.codonf==Fcodon) {
for(i=0; i<n; i++)
com.pi[i] = (i==n-1 ? 1 : exp(com.ppi[i]));
abyx (1./sum(com.pi,n), com.pi, n);
return(0);
}
for(j=0;j<3;j++) {
xtoy(ppi, com.pf3x4+j*4, 3);
com.pf3x4[j*4+3] = 1;
abyx (1./sum(com.pf3x4+j*4,4), com.pf3x4+j*4, 4);
if(com.codonf==F3x4 || com.codonf==F3x4MG)
ppi += 3;
}
if(com.codonf==FMutSel && com.npi==3) return(0);
if ((com.codonf>=F1x4 && com.codonf<=F3x4MG) || com.npi>3) {
for (i=0; i<n; i++) {
ic=FROM61[i]; b[0]=ic/16; b[1]=(ic/4)%4; b[2]=ic%4;
com.pi[i] = com.pf3x4[b[0]]*com.pf3x4[4+b[1]]*com.pf3x4[8+b[2]];
}
}
if (com.codonf==FMutSel && com.npi>3) {
for(i=0; i<n-1; i++) /* last codon has fitness 0 */
com.pi[i] *= exp(com.ppi[3+i]);
}
else if (com.codonf==FMutSel0 && com.npi>3) {
for(i=0; i<n; i++) { /* last amino acid has fitness 0 */
iaa = GeneticCode[com.icode][FROM61[i]];
if(iaa<19) com.pi[i] *= exp(com.ppi[3+iaa]);
}
for(i=0,zero(com.piAA,20); i<n; i++)
com.piAA[GeneticCode[com.icode][FROM61[i]]] += com.pi[i];
abyx (1./sum(com.piAA,20), com.piAA, 20);
}
else if (com.codonf==FMutSel0 && com.npi==3) {
for (i=0,zero(mutbias,20); i<n; i++) {
ic=FROM61[i]; iaa = GeneticCode[com.icode][ic];
b[0]=ic/16; b[1]=(ic/4)%4; b[2]=ic%4;
mutbias[iaa] += com.pf3x4[b[0]]*com.pf3x4[b[1]]*com.pf3x4[b[2]];
}
for(i=0; i<n; i++) {
ic=FROM61[i]; iaa = GeneticCode[com.icode][ic];
b[0]=ic/16; b[1]=(ic/4)%4; b[2]=ic%4;
y = com.pf3x4[b[0]]*com.pf3x4[b[1]]*com.pf3x4[b[2]];
com.pi[i] = y/mutbias[iaa] * com.piAA[iaa];
}
y = sum(com.pi, n);
}
abyx (1./sum(com.pi,n), com.pi, n);
return (0);
}
int SetParameters (double x[])
{
/* Set com. variables and initialize U, V, Root etc. before each calculation
of the likelihood function.
Is it a good idea to restruct this and/or Getinitials into two parts,
one for aa's and another for codons?
When (com.NSsites==NS02normal || NS3normal), p's are before w's in x[];
see CDFdN_dS().
*/
int i,j,k, ik=0, nUVR=NBTYPE+2;
double t,w0=-1, mr=0;
if(com.clock>=5) return(0);
if(com.fix_blength<2) SetBranch(x);
if(com.np<=com.ntime) return(0);
if(com.seqtype==1 || com.model==FromCodon || com.aaDist==AAClasses) {
k = com.ntime+com.nrgene;
if(com.hkyREV==0) {
if(com.fix_kappa==1) { com.pkappa[0]=com.kappa; ik=1; }
else com.kappa=x[k];
}
for(i=0; i<com.nkappa; i++)
com.pkappa[ik++] = x[k++];
if(com.npi) {
com.ppi = x+com.ntime+com.nrgene+com.nkappa;
GetCodonFreqs ();
}
com.pomega = x+com.ntime+com.nrgene+com.nkappa+com.npi;
}
for(j=0;j<com.nrgene;j++)
com.rgene[j+1] = x[com.ntime+j];
if(com.clock && AbsoluteRate) com.rgene[0] = x[0]; /* so that rgene are abs rates */
if(com.seqtype==1 && com.aaDist>=FIT1)
getpcodonClass(x, pcodonClass);
k=com.ntime+com.nrgene+com.nkappa+com.npi;
if (com.nrate) {
if(!com.model && !com.aaDist && !com.fix_omega && !com.NSsites)
com.omega=x[k];
if(com.seqtype==AAseq)
EigenQaa(NULL, Root, U, V, x+com.ntime+com.nrgene);
else if(com.model==0 && com.NSsites==0 && com.Mgene<=1)
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr, com.pkappa, com.omega,PMat);
else if((com.model==NSbranchB || com.model==NSbranch2)
&& com.NSsites==0 && com.nbtype<=nUVR) {
for(i=0; i<com.nbtype; i++) {
if(com.aaDist == AAClasses)
com.pomega = x+com.ntime+com.nrgene+com.nkappa+com.npi+i*com.nOmegaType;
else
w0 = (i==com.nOmega-1&&com.fix_omega?com.omega_fix:com.pomega[i]);
EigenQcodon(1,-1,NULL,NULL,NULL,_Root[i],_UU[i],_VV[i], &mr, com.pkappa,w0,PMat);
}
}
k = com.ntime+com.nrgene+com.nrate;
}
if (com.seqtype==CODONseq && com.NSsites)
SetParametersNSsites(x);
/* to force crash in case or error
if(com.model) com.omega=-1;
*/
/* branch models */
if(com.seqtype==CODONseq && com.model && com.NSsites==0 && com.aaDist==0) {
FOR(j,tree.nnode) {
if (j==tree.root) continue;
if (com.fix_omega && (int)nodes[j].label==com.nOmega-1)
nodes[j].omega = com.omega_fix;
else
nodes[j].omega = com.pomega[(int)nodes[j].label];
}
}
if (!com.fix_alpha && com.NSsites==0) {
com.alpha = x[k++];
if (com.fix_rho)
DiscreteGamma(com.freqK,com.rK,com.alpha,com.alpha,com.ncatG,DGammaUseMedian);
}
if (!com.fix_rho) {
com.rho=x[k++];
AutodGamma(com.MK, com.freqK, com.rK, &t, com.alpha, com.rho, com.ncatG);
}
return (0);
}
int DiscreteNSsites(double par[])
{
/* This discretizes the continuous distribution for dN/dS ratios among sites
and calculates freqK[] and rK[], using the median method.
par[] contains all paras in the w distribution. par[0] is the
proportion of beta if (com.NSsites==betaw), or the proportion of w=0 if
(com.NSsites=NS02normal).
This routine uses com.NSsites, com.ncatG, com.freqK, com.rK.
betaw has com.ncatG-1 site classes in the beta distribution, and 02normal
has com.ncatG-1 site classes in the mixed normal distribution.
See the function CDFdN_dS() for definitions of parameters.
*/
int status=0, i,j,off, K=com.ncatG-(com.NSsites==NSbetaw || com.NSsites==NS02normal);
double xb[2]={1e-7,99}; /* bounds for omega. */
double p01=0, p, w0, lnbeta;
if(com.NSsites==NSbeta || com.NSsites==NSbetaw) xb[1]=1;
if(com.NSsites==NSbeta || com.NSsites==NSbetaw) {
off = (com.NSsites==NSbetaw); /* par[0] is proportion for beta for M8 */
lnbeta = LnGamma(par[off])+LnGamma(par[off+1])-LnGamma(par[off]+par[off+1]);
for(j=0; j<K; j++) {
p = (j*2.+1)/(2.*K);
com.rK[j] = QuantileBeta(p, par[off], par[off+1], lnbeta);
}
}
else {
for(j=0; j<K; j++) {
p = (j*2. + 1)/(2.*K);
w0 = 0.01 + j/K;
if(com.rK[j]) w0 = (w0 + com.rK[j])/2;
com.rK[j] = Quantile(CDFdN_dS, p, w0, par, xb); /* median */
}
}
for(j=0; j<K; j++) com.freqK[j] = 1.0/K;
if(com.NSsites==NSbetaw) {
if(!com.fix_omega) com.rK[com.ncatG-1] = par[3];
else com.rK[com.ncatG-1] = com.omega_fix;
com.freqK[K] = 1-par[0];
for(j=0; j<K; j++) com.freqK[j] *= par[0];
}
if(com.NSsites==NS02normal) {
for(j=K-1;j>=0;j--) /* shift to right by 1 to make room for spike at 0*/
{ com.rK[j+1]=com.rK[j]; com.freqK[j+1]=com.freqK[j]; }
com.rK[0]=0; com.freqK[0]=par[0];
for(j=1;j<K+1;j++) com.freqK[j]*=(1-par[0]);
}
if(com.NSsites>=NSgamma){
if(!status && com.NSsites==NSbeta)
for(j=1;j<com.ncatG;j++) if(com.rK[j]+1e-7<com.rK[j-1]) status=1;
if(status) {
printf("\nwarning: DiscreteNSsites\nparameters: ");
FOR(j,(com.NSsites==7?2:4)) printf(" %12.6f", par[j]); FPN(F0);
FOR(j,com.ncatG) printf("%13.5f", com.freqK[j]); FPN(F0);
FOR(j,com.ncatG) printf("%13.5e", com.rK[j]); FPN(F0);
}
}
return(0);
}
double CDFdN_dS(double x,double p[])
{
/* This calculates the CDF of the continuous dN/dS distribution over sites,
to be used as argument to the routine Quantile(). When the distribution
has spikes, the spikes are ignored in this routine, and the scaling
is done outside this routine, for example, in DiscreteNSsites().
All parameters (par) for the w distribution are passed to this routine,
although some (p0 for the spike at 0) are not used in this routine.
Parameters are arranged in the following order:
NSgamma (2): alpha, beta
NS2gamma (4): p0, alpha1, beta1, alpha2 (=beta2)
NSbeta (2): p_beta, q_beta
NSbetaw (4): p0, p_beta, q_beta, w (if !com.fix_omega, not used here)
NSbetagamma (5): p0, p_beta, q_beta, alpha, beta
NSbeta1gamma (5): p0, p_beta, q_beta, alpha, beta (1+gamma)
NSbeta1normal (5): p0, p_beta, q_beta, mu, s (normal>1)
NS02normal (5): p0, p1, mu2, s1, s2 (s are sigma's)
NS3normal (6): p0, p1, mu2, s0, s1, s2 (s are sigma's)
Parameters p0 & p1 are transformed if (!LASTROUND)
*/
double cdf=-1;
double z, f[3],mu[3]={0,1,2},sig[3]; /* 3normal: mu0=0 fixed. mu2 estimated */
switch(com.NSsites) {
case(NSgamma): cdf=CDFGamma(x,p[0],p[1]); break;
case(NS2gamma):
cdf=p[0] *CDFGamma(x,p[1],p[2])+(1-p[0])*CDFGamma(x,p[3],p[3]); break;
case(NSbeta): cdf=CDFBeta(x,p[0],p[1],0); break;
case(NSbetaw): cdf=CDFBeta(x,p[1],p[2],0); break;
case(NSbetagamma):
cdf=p[0]*CDFBeta(x,p[1],p[2],0)+(1-p[0])*CDFGamma(x,p[3],p[4]); break;
case(NSbeta1gamma):
if(x<=1) cdf=p[0]*CDFBeta(x,p[1],p[2],0);
else cdf=p[0]+(1-p[0])*CDFGamma(x-1,p[3],p[4]);
break;
case(NSbeta1normal):
if(x<=1) cdf=p[0]*CDFBeta(x,p[1],p[2],0);
else {
cdf=CDFNormal((p[3]-1)/p[4]);
if(cdf<1e-9) {
matout(F0,p,1,5);;
printf("PHI(%.6f)=%.6f\n",(p[3]-1)/p[4],cdf); getchar();
}
cdf=p[0]+(1-p[0])*(1- CDFNormal((p[3]-x)/p[4])/cdf);
}
break;
case(NS02normal):
mu[2]=p[2]; sig[1]=p[3]; sig[2]=p[4];
f[1]=p[1]; f[2]=1-f[1];
cdf = 1 - f[1]* CDFNormal(-(x-mu[1])/sig[1])/CDFNormal(mu[1]/sig[1])
- f[2]* CDFNormal(-(x-mu[2])/sig[2])/CDFNormal(mu[2]/sig[2]);
break;
case(NS3normal):
mu[2]=p[2]; sig[0]=p[3]; sig[1]=p[4]; sig[2]=p[5];
if(LASTROUND) { f[0]=p[0]; f[1]=p[1]; }
else { z=(f[0]=exp(p[0]))+(f[1]=exp(p[1]))+1; f[0]/=z; f[1]/=z;}
f[2]=1-f[0]-f[1];
cdf = 1 - f[0]* 2*CDFNormal(-x/sig[0])
- f[1]* CDFNormal(-(x-mu[1])/sig[1])/CDFNormal(mu[1]/sig[1])
- f[2]* CDFNormal(-(x-mu[2])/sig[2])/CDFNormal(mu[2]/sig[2]);
break;
}
return(cdf);
}
void GetSNphysical(double pi[], double *Sphysical, double *Nphysical, double *S4)
{
/* this calculates the synonymous and nonsynonymous sites according to the
physical-site definition (Yang 2006 Computational Molecular Evolution, Section 2.5.4).
S and N are sites per codon.
It is not clear how to deal with stop codons.
*/
int i,j,k, ic,b[3], aa0,aa1, *code=GeneticCode[com.icode];
int by[3]={16,4,1}, nstop,s,n;
double y;
for(i=0,*Sphysical=*Nphysical=*S4=0; i<com.ncode; i++) {
ic=FROM61[i]; b[0]=ic/16; b[1]=(ic/4)%4; b[2]=ic%4;
/* no need to check the first and second positions here */
if(FourFold[b[0]][b[1]]) *S4 += pi[i];
aa0=code[ic];
for(j=0,s=n=nstop=0; j<3; j++) FOR(k,3) {
aa1 = code[ic + ((b[j]+k+1)%4 - b[j])*by[j]];
if(aa1==-1) nstop++;
else if(aa0==aa1) s++;
else n++;
}
/* s + n ~= 9 */
*Sphysical += pi[i]*s/9.*3.;
*Nphysical += pi[i]*n/9.*3.;
}
y = (*Sphysical + *Nphysical)/3;
*Sphysical /= y; *Nphysical /= y;
}
double GetOmega (int aa1, int aa2, double omega, double pomega[])
{
/* this gets the omega (w) value under different models for EigenQcodon().
*/
double w=1, fit1,fit2;
int k;
if (com.aaDist==AAClasses) {
if (aa1<aa2) { k=aa2; aa2=aa1; aa1=k; }
k=aa1*(aa1-1)/2+aa2;
if (pomega[OmegaAA[k]]<0) {
if (noisy) printf("aa1 & aa2 & iw & w: %d %d %d %.5f\n",
aa1,aa2,OmegaAA[k],pomega[OmegaAA[k]]);
pomega[OmegaAA[k]]=0;
}
if (com.seqtype==AAseq && com.nrate>65 && aa1*20+aa2==ijAAref)
; /* if estimating grantham's matrix with aa sequences */
else w = pomega[OmegaAA[k]];
}
else if (com.aaDist==0) w = omega; /* NSsites==0 or >0 */
else if (com.aaDist<=6) { /* chemical properties: a & b */
w = pomega[0]*com.daa[aa1*20+aa2];
if(com.aaDist>0) w = exp(-w); /* geometric */
else w = 1-w; /* linear */
if (com.seqtype==CODONseq) w *= pomega[1];
}
else if (com.aaDist>=FIT1) { /* ap,p*,av,v* (and w0 for FIT2) */
fit1 = -pomega[0]*square(AAchem[0][aa1]-pomega[1])
-pomega[2]*square(AAchem[1][aa1]-pomega[3]);
fit2 = -pomega[0]*square(AAchem[0][aa2]-pomega[1])
-pomega[2]*square(AAchem[1][aa2]-pomega[3]);
w = exp(-fit1-fit2);
if(com.aaDist==FIT2) w *= pomega[4];
}
return(w);
}
double GetMutationMultiplier (int i, int j, int pos, int from[3], int to[3])
{
/* This sets the mutation-bias multipliers for F1x4MG, F3x4MG, FMutSel0, FMutSel.
com.pi[], com.pf3x4[], and com.piAA[] are set correctly before this routine is called.
*/
int n=com.ncode, b1,b2;
double q, eFit1, eFit2, small=min2(1e-6, 1./com.ls);
/* b1 and b2 are the 2 unchanged positions */
if (pos==0) { b1=1; b2=2; }
else if(pos==1) { b1=2; b2=0; }
else { b1=0; b2=1; }
q = 1 / (com.pf3x4[b1*4+to[b1]] * com.pf3x4[b2*4+to[b2]]);
if(com.npi && (com.codonf==FMutSel || com.codonf==FMutSel0)) {
eFit1 = max2(com.pi[i], small);
eFit2 = max2(com.pi[j], small);
eFit1 /= com.pf3x4[from[0]] * com.pf3x4[from[1]] * com.pf3x4[from[2]];
eFit2 /= com.pf3x4[ to[0]] * com.pf3x4[ to[1]] * com.pf3x4[to[2]];
if(fabs(eFit2-eFit1)>1e-10)
q *= (log(eFit2)-log(eFit1))/(eFit2-eFit1);
else
q /= eFit2;
}
return(q);
}
int SelectionCoefficients (FILE* fout, double kappa[], double ppi[], double omega)
{
/* This calculates the distribution of S or 2Ns under the FMutSel or FMutSel0 models.
Qsubw[] is not correct if (com.NSsites) and the results are not printed.
*/
int n=Nsensecodon, i,j,k, ic1,ic2,b1,b2;
int ndiff,pos=0,from[3],to[3];
double q, summut=0, summutp=0, sumsub=0, sumsubw=0, eF1,eF2, fb[4];
double bigS=2, sumbadmut=0,sumgoodmut=0;
double Qmut[NCODE*NCODE], Qsub[NCODE*NCODE], Qsubw[NCODE*NCODE], Ns[NCODE*NCODE], mNs=0,mNsp=0,mNsn=0;
double maxNs=0, fNsMut[50]={0}, fNsSub[50]={0}, fNsSubw[50]={0}, small=min2(1e-6, 1./com.ls);
int ncat=21;
if(com.codonf<FMutSel0)
error2("codonf incorrect");
fprintf(fout, "\nI\tJ\tij\t2Ns_IJ\tpMut_IJ\tpSub_IJ\t2Ns_JI\tpMut_JI\tpSub_JI\n\n");
fb[0]=ppi[0]; fb[1]=ppi[1]; fb[2]=ppi[2]; fb[3]=1;
for (i=0;i<n*n;i++) Qmut[i]=Qsub[i]=Qsubw[i]=0;
for (i=1; i<n; i++) {
ic1=FROM61[i]; from[0]=ic1/16; from[1]=(ic1/4)%4; from[2]=ic1%4;
for(j=0; j<i; j++) {
ic2=FROM61[j]; to[0]=ic2/16; to[1]=(ic2/4)%4; to[2]=ic2%4;
for(k=0,ndiff=0; k<3; k++)
if(from[k]!=to[k]) { ndiff++; pos=k; }
if(ndiff!=1) continue;
q = 1;
if(com.hkyREV) { /* REV-GTR model */
b1 = min2(from[pos],to[pos]); /* b1 and b2 are changed nucleotides */
b2 = max2(from[pos],to[pos]);
if (b1==0 && b2==1) q = kappa[0]; /* TC or CT, relative to AG */
else if (b1==0 && b2==2) q = kappa[1]; /* TA or AT */
else if (b1==0 && b2==3) q = kappa[2]; /* TG or GT */
else if (b1==1 && b2==2) q = kappa[3]; /* CA or AC */
else if (b1==1 && b2==3) q = kappa[4]; /* CG or GC */
}
else { /* HKY model */
if(from[pos]+to[pos]==1 || from[pos]+to[pos]==5)
q = kappa[0];
}
eF1 = max2(com.pi[i], small) / (fb[from[0]] * fb[from[1]] * fb[from[2]]);
eF2 = max2(com.pi[j], small) / (fb[ to[0]] * fb[ to[1]] * fb[to[2]]);
Ns[i*n+j] = log(eF2/eF1);
Ns[j*n+i] = -Ns[i*n+j];
if(maxNs < fabs(Ns[i*n+j])) maxNs = fabs(Ns[i*n+j]);
Qmut[i*n+j] = Qsub[i*n+j] = com.pi[i] * q * fb[ to[pos]];
Qmut[j*n+i] = Qsub[j*n+i] = com.pi[j] * q * fb[from[pos]];
if(fabs(Ns[i*n+j]) > 1e-20) { /* non-neutral mutations */
Qsub[i*n+j] *= Ns[i*n+j]/(1 - exp(-Ns[i*n+j]));
Qsub[j*n+i] *= Ns[j*n+i]/(1 - exp(-Ns[j*n+i]));
}
Qsubw[i*n+j] = Qsub[i*n+j];
Qsubw[j*n+i] = Qsub[j*n+i];
if(!com.NSsites && GeneticCode[com.icode][ic1] != GeneticCode[com.icode][ic2]) {
Qsubw[i*n+j] *= com.omega;
Qsubw[j*n+i] *= com.omega;
}
summut += Qmut[i*n+j] + Qmut[j*n+i];
sumsub += Qsub[i*n+j] + Qsub[j*n+i];
sumsubw += Qsubw[i*n+j] + Qsubw[j*n+i];
if(fabs(Ns[i*n+j]) > 1e-20) { /* non-neutral mutations */
summutp += (Ns[i*n+j]>0 ? Qmut[i*n+j] : Qmut[j*n+i]);
mNsp += (Ns[i*n+j]>0 ? Qmut[i*n+j]*Ns[i*n+j] : Qmut[j*n+i]*Ns[j*n+i]);
mNsn += (Ns[i*n+j]<0 ? Qmut[i*n+j]*Ns[i*n+j] : Qmut[j*n+i]*Ns[j*n+i]);
}
else { /* neutral mutation. Ns = 0 makes no contribution to mNsp & mNsn */
summutp += (Qmut[i*n+j] + Qmut[j*n+i])/2;
}
mNs += (Qmut[i*n+j]+Qmut[j*n+i])*fabs(Ns[i*n+j]);
if (fabs(Ns[i*n+j])>bigS) {
if (Ns[i*n+j]>0) {
sumgoodmut += Qmut[i*n+j];
sumbadmut += Qmut[j*n+i];
}
else {
sumgoodmut += Qmut[j*n+i];
sumbadmut += Qmut[i*n+j];
}
}
fprintf(fout, "%c%c%c\t", BASEs[from[0]],BASEs[from[1]],BASEs[from[2]]);
fprintf(fout, "%c%c%c\t", BASEs[ to[0]],BASEs[ to[1]],BASEs[ to[2]]);
fprintf(fout, "%c%c", BASEs[from[pos]],BASEs[to[pos]]);
fprintf(fout, "\t%.5f\t%.5f\t%.5f", Ns[i*n+j], Qmut[i*n+j], Qsub[i*n+j]);
fprintf(fout, "\t%.5f\t%.5f\t%.5f", Ns[j*n+i], Qmut[j*n+i], Qsub[j*n+i]);
if(!com.NSsites)
fprintf(fout, "\t%.5f\t%.5f", Qsubw[i*n+j], Qsubw[j*n+i]);
FPN(fout);
} /* for (j) */
} /* for (i) */
sumgoodmut /= summut;
sumbadmut /= summut;
mNs /= summut;
mNsp /= summutp;
mNsn /= summut-summutp;
fprintf(fout, "\n\nHistograms\n2Ns\tFMut\tFSub(CodonUsage)\tFSubw(after w)\n\n");
for(i=0; i<n; i++) {
for(j=0; j<n; j++) {
if(Qmut[i*n+j] == 0) continue;
for(k=0; k<ncat-1; k++) {
if(Ns[i*n+j] < (-1 + (k+1.)*2/ncat)*maxNs) break;
}
fNsMut[k] += Qmut[i*n+j]/summut;
fNsSub[k] += Qsub[i*n+j]/sumsub;
fNsSubw[k] += Qsubw[i*n+j]/sumsubw;
}
}
for(k=0; k<ncat; k++) {
fprintf(fout, "%.5f\t%.5f\t%.5f", (-1 + (k+0.5)*2/ncat)*maxNs, fNsMut[k], fNsSub[k]);
if(!com.NSsites)
fprintf(fout, "\t%.5f", fNsSubw[k]);
FPN(fout);
}
fprintf(fout, "\nProportion of advantageous (S > 0) mutations:\n %.5f\n", summutp/summut);
fprintf(fout, "\nProportions of good & bad mutations (|S| > %.4f) among mutations:\n%.5f %.5f\n",
bigS, sumgoodmut, sumbadmut);
fprintf(fout, "\nmean |Ns| = %.5f\tmean Ns+ = %.5f\tmean Ns- = %.5f\n", mNs,mNsp,mNsn);
fprintf(frst1, "\t%.4f\t%.4f\t%.4f", mNs, mNsp, mNsn);
return(0);
}
int EigenQcodon (int mode, double blength, double *S, double *dS, double *dN,
double Root[], double U[], double V[], double *meanrate, double kappa[], double omega, double Q[])
{
/* This contructs the rate matrix Q for codon substitution and gets the eigen
values and vectors if getstats==0, or get statistics (dS & dN etc.) if
getstats==1.
The routine is also called by Qcodon2aa for mechanistic amino acid
substitution models.
Input parameters are kappa, omega and com.pi (or com.fb61).
Statistics calculated include S, dS & dN.
c0[0,1,2] and c[0,1,2] are rates for the 3 codon positions before and after
selection. c4 is for 4-fold rates. ts[3] and tv[3] are transition/
transversion rates for the three codon positions, not calculated.
mode=0: construct Q; 1: calculate UVRoot; 2:calculate statistics
*Qfactor or *meanrate:
=0 means that Q is scaled as usual;
<0 means that the scale factor will be calculated and returned
>0 the given scale factor is applied (1 means no scaling).
Note that under NSsites or branch&site models, scaling is done for all Q
matrices for the whole branch.
aaDist=FIT1 & FIT2: ap,p*,av,v*, (and w0 for FIT2)
The argument omega is used only if the model assumes one omega. For
AAClasses, com.pomega is used instead.
*/
int n=Nsensecodon, i,j,k, ic1,ic2,aa1,aa2, b1,b2;
int ndiff,pos=0,from[3],to[3];
double q, mr, rs0,ra0,rs,ra, y;
double Sphysical, Nphysical, S4, dSnew, dNnew;
double d4=0, d0[3], d[3], ts[3], tv[3]; /* rates at positions and 4-fold sites */
double *pi=(com.seqtype==AAseq?com.fb61:com.pi), w=-1, piQij;
double space[NCODE*(NCODE+1)];
/* Delete this after the MutSel project. */
static int times=0;
if(mode==1) times=0;
else times++;
NEigenQ++;
if(blength>=0 && (S==NULL||dS==NULL||dN==NULL)) error2("EigenQcodon");
for (i=0;i<n*n;i++) Q[i]=0;
for (i=1; i<n; i++) {
ic1=FROM61[i]; from[0]=ic1/16; from[1]=(ic1/4)%4; from[2]=ic1%4;
for(j=0; j<i; j++) {
ic2=FROM61[j]; to[0]=ic2/16; to[1]=(ic2/4)%4; to[2]=ic2%4;
for(k=0,ndiff=0; k<3; k++)
if(from[k]!=to[k]) { ndiff++; pos=k; }
if(ndiff!=1) continue;
q = 1;
if(com.hkyREV) { /* REV-GTR model */
b1 = min2(from[pos],to[pos]); /* b1 and b2 are changed nucleotides */
b2 = max2(from[pos],to[pos]);
if (b1==0 && b2==1) q = kappa[0]; /* TC or CT, relative to AG */
else if (b1==0 && b2==2) q = kappa[1]; /* TA or AT */
else if (b1==0 && b2==3) q = kappa[2]; /* TG or GT */
else if (b1==1 && b2==2) q = kappa[3]; /* CA or AC */
else if (b1==1 && b2==3) q = kappa[4]; /* CG or GC */
}
else { /* HKY model */
if(from[pos]+to[pos]==1 || from[pos]+to[pos]==5)
q = kappa[0];
}
if (com.codonf>=F1x4MG && com.codonf<=FMutSel && com.codonf!=Fcodon)
q *= GetMutationMultiplier (i, j, pos, from, to);
aa1 = GeneticCode[com.icode][ic1];
aa2 = GeneticCode[com.icode][ic2];
if(aa1 != aa2)
q *= GetOmega(aa1, aa2, omega, com.pomega);
Q[i*n+j] = q*pi[j];
Q[j*n+i] = q*pi[i];
} /* for (j) */
} /* for (i) */
for (i=0; i<n; i++)
Q[i*n+i] = -sum(Q+i*n,n);
for (i=0,mr=0; i<n; i++)
mr -= pi[i]*Q[i*n+i];
if(mode==1) { /* get Root, U, & V */
if (com.seqtype==AAseq) return (0);
eigenQREV(Q, pi, n, Root, U, V, space);
if(*meanrate>= 0) { /* apply scaling if meanrate>0 */
if(*meanrate>0)
mr = *meanrate;
for (i=0; i<n; i++)
Root[i] /= mr;
}
}
else if(mode==2) { /* get statistics */
for(i=0;i<3;i++) d[i] = d0[i] = ts[i] = tv[i]=0;
rs0 = ra0 = rs = ra = 0;
for (i=0; i<n; i++) {
ic1=FROM61[i]; from[0]=ic1/16; from[1]=(ic1/4)%4; from[2]=ic1%4;
for(j=0; j<n; j++) {
if(i==j || Q[i*n+j]==0) continue;
ic2=FROM61[j]; to[0]=ic2/16; to[1]=(ic2/4)%4; to[2]=ic2%4;
aa1 = GeneticCode[com.icode][ic1];
aa2 = GeneticCode[com.icode][ic2];
for(k=0,ndiff=0; k<3; k++)
if(from[k] != to[k]) { ndiff++; pos=k; }
if(ndiff!=1) error2("jgl");
piQij = pi[i]*Q[i*n+j];
if(pos==2 && FourFold[to[0]][to[1]])
d4 += piQij;
if(aa1==aa2) {
rs += piQij;
d0[pos] += piQij;
}
else {
ra += piQij;
w = GetOmega(aa1, aa2, omega, com.pomega);
ra0 += piQij/w;
d0[pos] += piQij/w;
}
d[pos] += piQij;
} /* for (j) */
} /* for (i) */
if(fabs(mr-(rs+ra)) > 1e-6)
error2("mr should be = rs+ra");
rs0 = rs;
w = (rs0+ra0); rs0 /= w; ra0 /= w; *S = rs0*3*com.ls;
if(com.NSsites==0 && blength>=0) { /* calculates dS & dN */
if(blength==0) *dS = *dN = 0;
rs /= mr;
ra /= mr;
*dS = blength*rs/(3*rs0);
*dN = blength*ra/(3*ra0);
w = (*dS>0 ? *dN/ *dS : -1);
GetSNphysical(com.pi, &Sphysical, &Nphysical, &S4);
for(i=0;i<3;i++) {
d[i] *= blength/mr;
d0[i] *= blength/mr;
}
d4 *= blength/mr/S4;
dNnew = blength*ra/Nphysical;
dSnew = blength*rs/Sphysical;
if(noisy>=9) {
printf("\nd123[*] =%9.5f%9.5f%9.5f average%9.5f\n", d[0],d[1],d[2], (d[0]+d[1]+d[2])/3);
printf( " [B] =%9.5f%9.5f%9.5f average%9.5f\n", d0[0],d0[1],d0[2], (d0[0]+d0[1]+d0[2])/3);
printf("accept =%9.5f%9.5f%9.5f\n\n", d[0]/d0[0],d[1]/d0[1],d[2]/d0[2]);
printf("w =%9.5f dN =%9.5f dS =%9.5f d4 =%9.5f (%.1f four-fold sites)\n", w, *dN,*dS, d4, S4*com.ls);
printf("%12s dN*=%9.5f dS*=%9.5f S* =%7.2f N* =%7.2f\n", "", dNnew, dSnew, Sphysical*com.ls, Nphysical*com.ls);
}
/* print out dN* dS* d4 d3B */
if(com.verbose && times==1 && com.ns==2)
fprintf(frst1, "\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f",
*dN*2, *dS*2, dNnew*2, dSnew*2, d0[2]*2, d4*2);
}
else if (com.NSsites) {
*dS = rs/(rs0*3);
*dN = ra/(ra0*3);
}
}
if(*meanrate<0) *meanrate = mr;
return(0);
}
int EigenQaa (FILE *fout, double Root[], double U[], double V[], double rate[])
{
/* Codon-based AA model must use FromCodon, even if com.aaDist==AAClasses.
*/
int naa=20, i,j,k;
double Q[20*20], mr=0, t=0;
double space[NCODE*NCODE*2+NCODE],*Qc=space+NCODE*NCODE, *space_pisqrt=Qc+NCODE*NCODE;
char aa3[4]="", AAratefile[96]="AAratefile.dat";
FILE *fAArate;
for(i=0; i<naa*naa; i++) Q[i]=0;
switch (com.model) {
case (Poisson) : case (EqualInput) :
fillxc (Q, 1., naa*naa); break;
case (Empirical) : case (Empirical_F):
for(i=0; i<naa; i++) for(j=0; j<i; j++)
Q[i*naa+j]=Q[j*naa+i]=com.daa[i*naa+j];
break;
case (FromCodon): /* EigenQcodon check mode value */
EigenQcodon(0,-1,NULL,NULL,NULL,Root,U,V, &mr,
(com.hkyREV||com.codonf==FMutSel?rate:&com.kappa),com.omega,Qc);
Qcodon2aa(Qc, com.fb61, Q, space);
break;
case (REVaa_0) :
for (i=1,k=0; i<naa; i++)
for (j=0; j<i; j++)
if (AA1STEP[i*(i-1)/2+j] && i*naa+j!=ijAAref)
Q[i*naa+j] = Q[j*naa+i] = rate[k++];
k = ijAAref;
Q[(k/naa)*naa+k%naa] = Q[(k%naa)*naa+k/naa] = 1;
break;
case (REVaa) :
for (i=0,k=0; i<naa; i++)
for (j=0; j<i; j++)
if (i*naa+j != ijAAref) Q[i*naa+j] = Q[j*naa+i] = rate[k++];
Q[ijAAref] = Q[(ijAAref%naa)*naa+(ijAAref/naa)] = 1;
break;
}
for(i=0; i<naa; i++) for(j=0; j<naa; j++)
Q[i*naa+j] *= com.pi[j];
for (i=0,mr=0; i<naa; i++) {
Q[i*naa+i] = 0;
Q[i*naa+i] = -sum(Q+i*naa,naa);
mr -= com.pi[i]*Q[i*naa+i];
}
if (fout && com.model>=REVaa_0) {
printf("\nAA substitution rate matrix printed into %s\n", AAratefile);
fAArate=(FILE*)gfopen(AAratefile,"w");
fprintf (fout, "\n\nRate matrix (symmetrical part, Sij)\n");
for(i=0,t=0; i<naa; i++) {
if(com.pi[i]==0) error2("EigenQaa: do this now");
for(j=0; j<i; j++)
t += Q[i*naa+j]/com.pi[j]/(naa*(naa-1)/2.);
}
for(i=0; i<naa; i++) {
fprintf (fout, "\n%-5s", getAAstr(aa3,i));
for(j=0; j<i; j++) fprintf(fout, " %8.2f", Q[i*naa+j]/t/com.pi[j]*100);
for(j=0; j<i; j++) fprintf(fAArate, " %8.2f", Q[i*naa+j]/t/com.pi[j]*100);
FPN(fAArate);
}
fputs("\n ",fout);
for(i=0; i<naa; i++)
fprintf(fout,"%5s", getAAstr(aa3,i));
FPN(fout);
fflush(fout);
matout(fAArate, com.pi, 1, naa);
for(i=0; i<naa; i++)
fprintf(fAArate,"%12s", getAAstr(aa3,i));
FPN(fAArate);
fprintf(fAArate,"\n\nNote: Amino acid rate matrix estimated from %s\n", com.seqf);
fclose(fAArate);
}
if (fout && frst1 && com.model>=REVaa_0) {
fprintf(frst1, "\nRate matrix (symmetrical part, Sij) for bubble plot\n");
for(i=0; i<naa; i++) for(j=0; j<i; j++)
fprintf(frst1, "\t%d\t%d\t%.2f\n", i+1,j+1,Q[i*naa+j]/t/com.pi[j]*100);
}
eigenQREV(Q, com.pi, naa, Root, U, V, space_pisqrt);
for(i=0; i<naa; i++)
Root[i] = Root[i]/mr;
return (0);
}
int Qcodon2aa (double Qc[], double pic[], double Qaa[], double piaa[])
{
/* Qc -> Qaa
This routine constructs the rate matrix for amino acid replacement from
the rate matrix for codon substitution, by congregating states in the
Markov chain. Both processes are time reversible, and only the
symmetrical part of the rate matrix are constructed. Codon frequencies
pic[] are used. They are constructed by assigning equal frequencies for
synonymous codons in the routine AA2Codonf().
Qaa(aai,aaj) = SUMi SUMj (piC[i]*piC[j]]*Qc[i][j]) / (piAA[i]*piAA[j])
*/
int i, j, aai, aaj, nc=Nsensecodon, naa=20;
double ti, tij;
zero(piaa,naa);
zero(Qaa,naa*naa);
for(i=0; i<nc; i++)
piaa[GeneticCode[com.icode][FROM61[i]]] += pic[i];
for(i=0; i<nc; i++) {
aai = GeneticCode[com.icode][FROM61[i]];
if(piaa[aai]==0) ti = 0;
else ti = pic[i]/piaa[aai];
for(j=0; j<i; j++) {
aaj = GeneticCode[com.icode][FROM61[j]];
if (Qc[i*nc+j]==0 || aai==aaj) continue;
if(piaa[aaj]==0)
tij = 0;
else
tij = ti*pic[j]*Qc[i*nc+j]/piaa[aaj];
Qaa[aai*naa+aaj] += tij;
Qaa[aaj*naa+aai] += tij;
}
}
return (0);
}
// Here we start GPUing
__global__ void TCconP(double *PMat_d, double *conP_d, char *z_d, int pos0, int pos1, int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<(n*(pos1-pos0)))
{
i += pos0*n;
// if other functions in parallel with this might have to atomicMult
conP_d[i] *= PMat_d[ (i%n)*n + z_d[i/n] ];
}
}
__global__ void TnCconP(double *PMat_d, double *conP_d, char *z_d, int pos0, int pos1, int n, double t)
{
}
int ConditionalPNode (int inode, int igene, double x[])
{
int n=com.ncode, i,j,k,h, ison, pos0=com.posG[igene], pos1=com.posG[igene+1];
double t;
for(i=0; i<nodes[inode].nson; i++)
if(nodes[nodes[inode].sons[i]].nson>0 && !com.oldconP[nodes[inode].sons[i]])
ConditionalPNode(nodes[inode].sons[i], igene, x);
if(inode<com.ns)
for(h=pos0*n; h<pos1*n; h++)
nodes[inode].conP[h] = 0; /* young ancestor */
else
for(h=pos0*n; h<pos1*n; h++)
nodes[inode].conP[h] = 1;
if (com.cleandata && inode<com.ns)
for(h=pos0; h<pos1; h++)
nodes[inode].conP[h*n+com.z[inode][h]] = 1;
for (i=0; i<nodes[inode].nson; i++) {
ison = nodes[inode].sons[i];
t = nodes[ison].branch * _rateSite;
if(com.clock<5) {
if(com.clock) t *= GetBranchRate(igene,(int)nodes[ison].label,x,NULL);
else t *= com.rgene[igene];
}
GetPMatBranch(PMat, x, t, ison);
if (nodes[ison].nson<1 && com.cleandata) { /* tip && clean */
for(h=pos0; h<pos1; h++)
for(j=0; j<n; j++)
nodes[inode].conP[h*n+j] *= PMat[j*n+com.z[ison][h]];
}
else if (nodes[ison].nson<1 && !com.cleandata) { /* tip & unclean */
for(h=pos0; h<pos1; h++)
for(j=0; j<n; j++) {
for(k=0,t=0; k<nChara[com.z[ison][h]]; k++)
t += PMat[j*n+CharaMap[com.z[ison][h]][k]];
nodes[inode].conP[h*n+j] *= t;
}
}
else { /* internal node */
for(h=pos0; h<pos1; h++)
for(j=0; j<n; j++) {
for(k=0,t=0; k<n; k++)
t += PMat[j*n+k]*nodes[ison].conP[h*n+k];
nodes[inode].conP[h*n+j] *= t;
}
}
} /* for (ison) */
if(com.NnodeScale && com.nodeScale[inode])
NodeScale(inode, pos0, pos1);
return (0);
}
int PMatJC69like (double P[], double t, int n)
{
int i;
double pii=1./n+(1.-1./n)*exp(-n/(n-1.)*t), pij=(1.-pii)/(n-1.);
for(i=0; i<n*n; i++) P[i] = pij;
for(i=0; i<n; i++) P[i*n+i] = pii;
return (0);
}
double Fcodon_3x4 (double fcodon[], double fb3x4[]);
void OutFb3x4(FILE*fout, double fb3x4[]);
void CountCodons (FILE *fout,double fcodonsg[],double fb3x4sg[],double fb4g[]);
double Fcodon_3x4(double fcodon[], double fb3x4[])
{
/* this converts the codon frequencies into a fb3x4 table. fcodon has 64 codons.
*/
int b[3], k,j, nc=64, status=0;
double t;
zero(fb3x4,12);
for(k=0; k<nc; k++) {
b[0]=k/16; b[1]=(k%16)/4; b[2]=k%4;
for(j=0; j<3; j++)
fb3x4[j*4+b[j]]+=fcodon[k];
}
for(j=0; j<3; j++) {
t=sum(fb3x4+j*4, 4);
if(t<1e-15) status=-1;
abyx(1/t, fb3x4+j*4, 4);
}
if(status)
matout (F0, fcodon, 16, 4);
return(status);
}
void OutFb3x4 (FILE*fout, double fb3x4[])
{
int j,k;
for(j=0; j<3; j++) {
fprintf(fout, "\nposition %2d:", j+1);
for(k=0;k<4;k++)
fprintf(fout,"%5c:%7.5f", BASEs[k],fb3x4[j*4+k]);
}
fprintf(fout,"\nAverage ");
for(k=0; k<4; k++)
fprintf(fout,"%5c:%7.5f", BASEs[k],(fb3x4[0*4+k]+fb3x4[1*4+k]+fb3x4[2*4+k])/3);
}
void CountCodons (FILE *fout,double fcodonsg[],double fb3x4sg[],double fb4g[])
{
/* Outputs codon counts and f3x4 tables, called from InitializeCodon(), where
more notes are found.
*/
int h, j,k, nc=NCODE, ig, wname=15, nb[3], ib[3][4], ic;
/* counts codons for output, species first, genes next */
fputs("Codon usage in sequences\n",fout);
zero(fcodonsg, com.ns*nc);
for(j=0; j<com.ns; j++) {
for(h=0; h<com.npatt; h++) {
for(k=0; k<3; k++)
NucListall(CODONs[com.z[j][h]][k], &nb[k], ib[k]);
k = nb[0]*nb[1]*nb[2];
if(k>1) continue;
ic = ib[0][0]*16+ib[1][0]*4+ib[2][0];
fcodonsg[j*nc+ic] += com.fpatt[h];
}
Fcodon_3x4(fcodonsg+j*nc, fb3x4sg+j*12);
}
printcums(fout, com.ns, fcodonsg, com.icode);
fputs("Codon position x base (3x4) table for each sequence.",fout);
for(j=0; j<com.ns; j++) {
fprintf (fout,"\n\n#%d: %-*s", j+1,wname,com.spname[j]);
OutFb3x4(fout, fb3x4sg+j*12);
}
zero(fcodonsg, (com.ngene+1)*nc);
zero(fb4g, (com.ngene+1)*4);
for(ig=0; ig<com.ngene; ig++) {
for(j=0; j<com.ns; j++) {
for(h=com.posG[ig]; h<com.posG[ig+1]; h++) {
for(k=0; k<3; k++)
NucListall(CODONs[com.z[j][h]][k], &nb[k], ib[k]);
k = nb[0]*nb[1]*nb[2];
if(k>1) continue;
ic = ib[0][0]*16+ib[1][0]*4+ib[2][0];
fcodonsg[ig*nc+ic] += com.fpatt[h];
}
}
if(Fcodon_3x4(fcodonsg+ig*nc, fb3x4sg+ig*12)) {
printf("Gene %d seems empty.", ig+1);
exit(-1);
}
}
if(com.ngene>1) {
fputs("\n\nCodon usage in genes\n",fout);
printcums(fout, com.ngene, fcodonsg, com.icode);
fputs("Codon position x base (3x4) table for each gene.\n",fout);
for(ig=0; ig<com.ngene; ig++) {
fprintf (fout,"\n\nGene #%d", ig+1);
OutFb3x4(fout, fb3x4sg+ig*12);
}
}
for(ig=0; ig<com.ngene; ig++)
for(k=0;k<nc;k++) fcodonsg[com.ngene*nc+k]+=fcodonsg[ig*nc+k];
Fcodon_3x4(fcodonsg+com.ngene*nc, fb3x4sg+com.ngene*12);
for(ig=0; ig<com.ngene+1; ig++)
for(j=0;j<3;j++) for(k=0;k<4;k++) fb4g[ig*4+k]+=fb3x4sg[ig*12+j*4+k]/3;
fputs("\n\nSums of codon usage counts",fout);
printcu(fout, fcodonsg+com.ngene*nc, com.icode);
if(!com.cleandata) fputs("\n(Ambiguity data are not used in the counts.)\n",fout);
fputs("\n\nCodon position x base (3x4) table, overall\n",fout);
OutFb3x4(fout, fb3x4sg+com.ngene*12);
{
double *fb3x4 = fb3x4sg+com.ngene*12, GC3;
GC3 = (fb3x4[0*4+1] + fb3x4[1*4+1] + fb3x4[2*4+1])/3
+ (fb3x4[0*4+3] + fb3x4[1*4+3] + fb3x4[2*4+3])/3;
fprintf(frst1, "\t%.4f", GC3);
}
}
void AddCodonFreqSeqGene (int js, int ig, double fcodon0[], double fcodon[],
double fb3x40[], double fb3x4[],
double fb40[], double fb4[]);
void AddCodonFreqSeqGene (int js, int ig, double fcodon0[], double fcodon[],
double fb3x40[], double fb3x4[],
double fb40[], double fb4[])
{
/* This adds codon and nucleotide counts in sequence js in gene ig to fcodon,
fb3x4, and fb4, using fcodon0, fb3x40, and fb40 to resolve ambiguities
Similar to AddFreqSeqGene().
*/
int h, k, i0,i1,i2, nc=NCODE;
int nb[3],ib[3][4],ic=-1;
double t,t1;
char str[4]=" ", codon[4]=" ", ft[64];
for(h=com.posG[ig]; h<com.posG[ig+1]; h++) {
for(k=0; k<3; k++)
NucListall(CODONs[com.z[js][h]][k], &nb[k], ib[k]);
k = nb[0]*nb[1]*nb[2];
for(k=0; k<3; k++) { /* f3x4 & f1x4, no regard for stop codons */
for(i0=0,t=t1=0; i0<nb[k]; i0++) {
t += fb3x40[k*4+ib[k][i0]];
t1 += fb40[ib[k][i0]];
}
for(i0=0; i0<nb[k]; i0++) {
fb3x4[k*4+ib[k][i0]] += com.fpatt[h] * fb3x40[k*4+ib[k][i0]]/t;
fb4[ib[k][i0]] += com.fpatt[h]* fb40[ib[k][i0]]/t1;
}
}
for(i0=0; i0<64; i0++) ft[i0]=0;
for(i0=k=0,t=0; i0<nb[0]; i0++) FOR(i1,nb[1]) FOR(i2,nb[2]) {
ic = ib[0][i0]*16+ib[1][i1]*4+ib[2][i2];
if(FROM64[ic]==-1) continue;
ft[ic] = 1; k++;
t += fcodon0[ic];
}
if(k==0) printf("%s in seq. %d is stop (icode=%d)\n",
getcodon(str,ic),js+1,com.icode);
if(t<1e-100)
printf("difficulty in resolving codon %s.\n", codon);
for(ic=0; ic<nc; ic++) if(ft[ic])
fcodon[ic] += (t>0 ? com.fpatt[h]*fcodon0[ic]/t : com.fpatt[h]/k);
}
}
int InitializeCodon (FILE *fout, double space[])
{
/* Count codons for genes, calculate site patterns and fpatt.
Sequences com.z[] are not coded and may contain ambiguity characters
Space requirement for fcodonsg & fb3x4sg: max(ngene+1,ns)*(64+12+4).
First we count codons for output, with ambiguity characters ignored.
Then we recount to resolve ambiguity characters, to be used for ML
calculation later on.
set up com.pi[NCODE], com.piG[NGENE][64], according to com.codonf
com.pi[] has freqs for all codon sites in the seqs if ngene>1.
Space use is not economical as com.piG and fcodonsg are separate and
duplicated.
*/
int j,k, nc=NCODE, ig, ic[3], wrongorder[4]={2,1,3,0};
int irf,nrf=20;
double *fcodonsg=space, *fb3x4sg=space+max2((com.ngene+1),com.ns)*nc;
double *fb4g=space+(com.ngene+1)*(64+12);
double *ppi, fcodon0[64],fb3x40[12],fb40[4], d1,d2,d3;
/* counts codons for output, species first, genes next */
if(noisy) puts("Counting codons..");
CountCodons(fout, fcodonsg, fb3x4sg, fb4g);
/* Now to count fcodonsg, fb3x4sg, fb4g, to set up pi's for ML calculation.
Three iterations are going on at the same time.
*/
if (com.codonf!=Fequal && !com.cleandata) { /* iteration to resolve ambiguities */
for(ig=0; ig<com.ngene; ig++) { /* calculate com.piG[] */
axtoy(1/sum(fcodonsg+ig*nc,nc), fcodonsg+ig*nc, fcodon0, nc);
xtoy(fb3x4sg+ig*12, fb3x40, 12);
xtoy(fb4g+ig*4, fb40, 4);
for(irf=0; irf<nrf; irf++) {
zero(fcodonsg + ig*nc, nc);
zero(fb3x4sg + ig*12, 12);
zero(fb4g+ig*4, 4);
for(j=0; j<com.ns; j++) {
AddCodonFreqSeqGene (j, ig, fcodon0, fcodonsg+ig*nc,
fb3x40, fb3x4sg+ig*12, fb40, fb4g+ig*4);
}
abyx(1/sum(fcodonsg+ig*nc,nc), fcodonsg + ig*nc, nc);
for(k=0; k<3; k++)
abyx(1/sum(fb3x4sg+ig*12+k*4,4), fb3x4sg+ig*12+k*4, 4);
abyx(1/sum(fb4g+ig*4,4), fb4g+ig*4, 4);
d1 = distance(fcodonsg+ig*nc, fcodon0, nc);
d2 = distance(fb3x4sg+ig*12, fb3x40, 12);
d3 = distance(fb4g+ig*4, fb40, 4);
if(d1<1e-8 && d2<1e-8 && d3<1e-8)
break;
xtoy(fcodonsg+ig*nc, fcodon0, nc);
xtoy(fb3x4sg+ig*12, fb3x40, 12);
xtoy(fb4g+ig*4, fb40, 4);
} /* for(irf) */
} /* for(ig) */
axtoy(1/sum(fcodonsg+com.ngene*nc,nc), fcodonsg+com.ngene*nc, fcodon0, nc);
xtoy(fb3x4sg+com.ngene*12, fb3x40, 12);
xtoy(fb4g+com.ngene*4, fb40, 4);
for(irf=0; irf<nrf; irf++) { /* calculate com.pi[] */
zero(fcodonsg + com.ngene*nc, nc);
zero(fb3x4sg + com.ngene*12, 12);
zero(fb4g + com.ngene*4, 4);
for(ig=0; ig<com.ngene; ig++)
for(j=0; j<com.ns; j++) {
AddCodonFreqSeqGene(j, ig, fcodon0, fcodonsg+com.ngene*nc,
fb3x40, fb3x4sg+com.ngene*12, fb40, fb4g+com.ngene*4);
}
abyx(1/sum(fcodonsg+com.ngene*nc,nc), fcodonsg+com.ngene*nc, nc);
for(k=0;k<3;k++)
abyx(1/sum(fb3x4sg+com.ngene*12+k*4,4), fb3x4sg+com.ngene*12+k*4, 4);
abyx(1/sum(fb4g+com.ngene*4,4), fb4g+com.ngene*4, 4);
d1 = distance(fcodonsg+com.ngene*nc, fcodon0, nc);
d2 = distance(fb3x4sg+com.ngene*12, fb3x40, 12);
d3 = distance(fb4g+com.ngene*4, fb40, 4);
if(d1<1e-8 && d2<1e-8 && d3<1e-8) break;
xtoy(fcodonsg+com.ngene*nc, fcodon0, nc);
xtoy(fb3x4sg+com.ngene*12, fb3x40, 12);
xtoy(fb4g+com.ngene*4, fb40, 4);
} /* for(irf) */
}
/* edit com.pi & com.piG according to com.codonf */
for(ig=0; ig<com.ngene+1; ig++) {
ppi = (ig<com.ngene?com.piG[ig]:com.pi);
zero(ppi, nc);
if (com.codonf==Fequal)
fillxc(ppi,1,com.ncode);
else if (com.codonf==Fcodon || com.codonf==FMutSel0 || com.codonf==FMutSel) {
for(k=0; k<nc; k++)
if(FROM64[k]>-1) ppi[FROM64[k]] = fcodonsg[ig*nc+k];
}
else if (com.codonf==F3x4 || com.codonf==F3x4MG) {
for(k=0; k<nc; k++)
if(FROM64[k]>-1)
ppi[FROM64[k]] = fb3x4sg[ig*12+k/16]*fb3x4sg[ig*12+4+(k/4)%4]*fb3x4sg[ig*12+8+k%4];
}
else if (com.codonf==F1x4 || com.codonf==F1x4MG) {
for(k=0; k<nc; k++)
if(FROM64[k]>-1)
ppi[FROM64[k]] = fb4g[ig*4+k/16]*fb4g[ig*4+(k/4)%4]*fb4g[ig*4+k%4];
}
abyx(1/sum(ppi,com.ncode), ppi, com.ncode); /* ncode != nc */
if(ig<com.ngene) {
if (com.codonf>=F1x4 && com.codonf<=FMutSel)
xtoy(fb3x4sg+ig*12, com.f3x4[ig], 12);
/* write 1x4 tables into 3x4 tables */
if (com.codonf==FMutSel0 || com.codonf==FMutSel || com.codonf==F1x4 || com.codonf==F1x4MG) {
for(k=0; k<4; k++) {
d1 = com.f3x4[ig][0*4+k] + com.f3x4[ig][1*4+k] + com.f3x4[ig][2*4+k];
for(j=0; j<3; j++)
com.f3x4[ig][j*4+k] = d1/3;
}
}
}
}
if(com.codonf==FMutSel0) {
for(j=0,zero(com.piAA,20); j<com.ncode; j++)
com.piAA[GeneticCode[com.icode][FROM61[j]]] += com.pi[j];
matout(F0, com.piAA, 1, 20);
}
if(com.codonf>=F1x4 && com.codonf<=FMutSel)
com.pf3x4 = com.f3x4[0];
if(com.verbose && com.ngene==1) {
fprintf(fout,"\n\nCodon frequencies under model, for use in evolver (TTT TTC TTA TTG ... GGG):\n");
for(k=0; k<64; k++) {
fprintf(fout,"%12.8f",GeneticCode[com.icode][k]==-1?0:com.pi[FROM64[k]]);
if((k+1)%4==0) FPN(fout);
}
/*
fprintf(fout, "\nWrong order: AAA AAC AAG AAT ... TTT\n");
for(k=0; k<64; k++) {
ic[0] = wrongorder[k/16];
ic[1] = wrongorder[(k/4)%4];
ic[2] = wrongorder[k%4];
j = ic[0]*16+ic[1]*4+ic[2];
if(GeneticCode[com.icode][j]!=-1)
fprintf(fout,"%.8f, ", com.pi[FROM64[j]]);
}
exit(0);
*/
}
return(0);
}
int AA2Codonf(double faa[20], double fcodon[])
{
/* get codon freqs from amino acid freqs, assuming equal freq. for each syn
codon. Used in codon-based amino acid substitution models.
*/
int ic, iaa, i, NCsyn[20];
FOR(i,20) NCsyn[i]=0;
FOR(ic,64) if((iaa=GeneticCode[com.icode][ic])!=-1) NCsyn[iaa]++;
zero(fcodon, 64);
for(ic=0; ic<Nsensecodon; ic++) {
iaa=GeneticCode[com.icode][FROM61[ic]];
fcodon[ic]+=faa[iaa]/NCsyn[iaa];
}
if(fabs(1-sum(fcodon,64))>1e-6) printf("\n1 == %12.7f\n", sum(fcodon,64));
return (0);
}
int DistanceMatAA (FILE *fout)
{
int i,j, h;
double p, lst;
if(fout) fprintf(fout,"\nAA distances (raw proportions of different sites)\n");
for(h=0,lst=0; h<com.npatt; h++) lst+=com.fpatt[h];
FOR(i, com.ns) {
if(fout) fprintf(fout, "\n%-15s", com.spname[i]);
FOR(j,i) {
for(h=0,p=0; h<com.npatt; h++)
if (com.z[i][h] != com.z[j][h]) p += com.fpatt[h];
p /= lst;
SeqDistance[i*(i-1)/2+j]=p;
if(fout) fprintf(fout, " %7.4f", p);
}
}
if(fout) FPN(fout);
return (0);
}
int GetDaa (FILE* fout, double daa[])
{
/* Get the amino acid distance (or substitution rate) matrix
(grantham, dayhoff, jones, etc).
*/
FILE * fdaa;
char aa3[4]="";
int i,j, naa=20;
double dmax=0, dmin=1e40;
if(noisy>3) printf("\n\nReading matrix from %s", com.daafile);
if (com.model==REVaa_0||com.model==REVaa) puts(", to get initial values.");
fdaa = gfopen(com.daafile, "r");
for (i=0; i<naa; i++)
for (j=0,daa[i*naa+i]=0; j<i; j++) {
fscanf(fdaa, "%lf", &daa[i*naa+j]);
daa[j*naa+i] = daa[i*naa+j];
if (dmax<daa[i*naa+j]) dmax = daa[i*naa+j];
if (dmin>daa[i*naa+j]) dmin = daa[i*naa+j];
}
if(com.aaDist && (com.seqtype==1||com.model==FromCodon)) { /* codon model */
if(noisy) printf("\ndistance: %.2f --- %.2f\n", dmin, dmax);
for(i=0; i<naa; i++)
for(j=0; j<naa; j++)
com.daa[i*naa+j] /= dmax;
}
else if (com.seqtype==AAseq) {
for(i=0; i<naa; i++)
for(j=0; j<i; j++)
if(i*naa+j!=ijAAref)
daa[j*naa+i] = daa[i*naa+j] /= com.daa[ijAAref];
daa[ijAAref] = daa[(ijAAref%naa)*naa+(ijAAref/naa)] = 1;
if(com.model==Empirical) {
for(i=0; i<naa; i++)
if(fscanf(fdaa,"%lf",&com.pi[i])!=1)
error2("aaRatefile");
if (fabs(1-sum(com.pi,20))>1e-5) {
printf("\nSum of freq. = %.6f != 1 in aaRateFile\n", sum(com.pi,naa));
exit(-1);
}
}
}
fclose(fdaa);
if(fout) {
fprintf (fout, "\n%s\n", com.daafile);
for(i=0; i<naa; i++) {
fprintf (fout, "\n%4s", getAAstr(aa3,i));
for(j=0; j<i; j++)
fprintf (fout, "%5.0f", daa[i*naa+j]);
}
FPN (fout);
}
/*
SetAA1STEP();
for(i=0,FPN(frst);i<naa;i++,FPN(frst))
FOR(j,i) fprintf(frst,"%3d",AA1STEP[i*(i-1)/2+j]);
for(i=0,k=0;i<naa;i++)
FOR(j,i) if(AA1STEP[i*(i-1)/2+j]) {
fprintf(frst,"%c%c\t%.2f\n",AAs[i],AAs[j],com.daa[i*naa+j]);
k++;
}
fprintf(frst,"\n%d one-step amino acid pairs\n", k);
exit (0);
*/
return (0);
}
int SetAA1STEP (void)
{
/* Sets the global variable AA1STEP[19*20/2].
Sets com.nrate for models like AAClasses and REVaa_0.
AA1STEP[k] marks the k_th pair of amino acids that differ at one position,
Q[i*naa+j] is the k_th nonzero element if AA1STEP[k]=i*naa+j;
Lower diagonal of Q is visited, with i>j.
*/
int ncode0=com.ncode, nc, naa=20, i,j,k, ic1,ic2, ndiff, from[3],to[3];
int *Q=(int*)PMat;
setmark_61_64();
nc=Nsensecodon; com.ncode=ncode0;
for(i=0; i<naa*naa; i++) Q[i]=0;
for (i=0; i<nc; i++)
for(j=0; j<i; j++) {
ic1=FROM61[i]; from[0]=ic1/16; from[1]=(ic1/4)%4; from[2]=ic1%4;
ic2=FROM61[j]; to[0]=ic2/16; to[1]=(ic2/4)%4; to[2]=ic2%4;
for (k=0,ndiff=0; k<3; k++) if (from[k]!=to[k]) ndiff++;
if (ndiff!=1) continue;
ic1 = GeneticCode[com.icode][ic1];
ic2 = GeneticCode[com.icode][ic2];
Q[ic1*naa+ic2]++;
Q[ic2*naa+ic1]++;
}
/*
#if DEBUG
for (i=0,FPN(F0); i<naa; i++,FPN(F0)) FOR(j,i)printf("%3d",Q[i*naa+j]);
#endif
*/
for (i=0,k=0; i<naa; i++)
for(j=0; j<i; j++) {
if (Q[i*naa+j]>0) { AA1STEP[i*(i-1)/2+j] = 1; k++; }
else AA1STEP[i*(i-1)/2+j] = 0;
}
/*
for(i=0,FPN(F0);i<naa;i++,FPN(F0)) FOR(j,i)printf("%3d",AA1STEP[i*(i-1)/2+j]);
*/
if(com.seqtype==2) com.nrate = k-1; /* one element (ijAAref) is fixed */
return(0);
}
int GetOmegaAA (int OmegaAA[])
{
/* This routine reads the file OmegaAA.dat to initialize the
lower diagonal matrix OmegaAA, which specifies the aa substituion
rate classes. To be used with the codon substitution model
AAClasses, which specifies several classes of the dN/dS ratio.
OmegaAA[iaa*(iaa-1)/2+jaa]= -1 if no one-step change is possible;
= 0 for the first, background, class
= i (1,..,nclass) if iaa and jaa are in class i
*/
char *OmegaAAf="OmegaAA.dat", line[1024];
FILE *fin=NULL;
int iomega, n1step=0, i,j,k, iaa,jaa, npair, naa=20, nline=1024;
for(i=0,n1step=0; i<naa; i++) for(j=0; j<i; j++)
if (AA1STEP[i*(i-1)/2+j]) { OmegaAA[i*(i-1)/2+j] = 0; n1step++; }
else OmegaAA[i*(i-1)/2+j] = -1;
if (noisy) {
printf("\n\n%d one-step aa pairs.\n", n1step);
printf("Reading omega class from %s.\n", OmegaAAf);
}
com.nOmegaType = -1;
fin=fopen(OmegaAAf,"r");
if(fin) fscanf(fin, "%d", &com.nOmegaType);
if (com.nOmegaType<1 || com.nOmegaType>65-1) {
if (com.seqtype!=CODONseq) puts("\nTo be tested.\a");
com.nOmegaType=0;
if (com.seqtype==AAseq) {
for(i=0; i<naa; i++) for(j=0; j<i; j++) if(i*naa+j != ijAAref && AA1STEP[i*(i-1)/2+j])
OmegaAA[i*(i-1)/2+j] = com.nOmegaType++;
}
else
for(i=0; i<naa; i++) for(j=0; j<i; j++)
if(AA1STEP[i*(i-1)/2+j]) OmegaAA[i*(i-1)/2+j] = com.nOmegaType++;
printf("%d dN/dS ratios estimated from data.\n",com.nOmegaType);
}
else {
printf("%d dN/dS ratios estimated from data.\n",com.nOmegaType);
for(iomega=0; iomega<com.nOmegaType-1; iomega++) {
fscanf(fin, "%d", &j);
if (j!=iomega+1) { printf("err data file %s.", OmegaAAf); exit(-1); }
printf ("\nClass #%d: ", j);
j = fgetc (fin); if (j!=':') error2("err expecting :");
fgets (line, nline, fin);
printf ("%s\n", line);
for (j=0,npair=0; j<nline-1 && line[j] && line[j]!='\n'; j++) {
iaa = line[j];
if (!isalpha(iaa)) continue;
jaa = line[++j]; if(!isalpha(jaa)) error2("err jaa");
npair++;
printf ("\npair %2d: |%c%c| ", npair, iaa,jaa);
iaa=CodeChara((char)iaa,AAseq); jaa=CodeChara((char)jaa,AAseq);
if(iaa<0||iaa>19||jaa<0||jaa>19) error2("aa not found");
if (iaa<jaa) { k=jaa, jaa=iaa; iaa=k; }
printf ("|%c%c (%2d,%2d)| ", AAs[iaa], AAs[jaa],iaa,jaa);
if (iaa==jaa) puts("This pair has no effect.");
if (OmegaAA[iaa*(iaa-1)/2+jaa]==-1) {
puts("\nThis pair cannot change in one step and is ignored!");
continue;
}
else if (OmegaAA[iaa*(iaa-1)/2+jaa])
error2("This pair has already been specified?");
OmegaAA[iaa*(iaa-1)/2+jaa]=iomega+1;
printf (" in class %d ",iomega+1);
}
}
}
if(fin) fclose(fin);
com.nrate = com.nkappa = (com.hkyREV ? 5 : !com.fix_kappa);
com.nrate += (com.nOmega = com.nOmegaType);
/*
for (i=0; i<naa; i++,FPN(F0))
for(j=0; j<i; j++) printf ("%3d", OmegaAA[i*(i-1)/2+j]);
*/
return (0);
}
int GetCodonFreqs2 (void)
{
/* Recalcualte the expected codon frequencies (com.pi[]) using the control
variable com.codonf, and the observed codon frequencies in com.pi[].
com.pi[] is both input (observed codon frequencies) and output (expected
frequencies under the model codonf).
This is used by PairwiseCodon().
*/
int n=com.ncode, i,j, ic,b[3];
double *pi=com.pi, fb3x4[12], fb4[4], GC[3]={0};
if (com.codonf==Fequal)
{ fillxc(pi,1./n,n); return 0; }
if (com.codonf!=Fcodon && com.codonf!=FMutSel) {
for (i=0,zero(fb3x4,12),zero(fb4,4); i<n; i++) {
ic=FROM61[i]; b[0]=ic/16; b[1]=(ic/4)%4; b[2]=ic%4;
for(j=0;j<3;j++)
{ fb3x4[j*4+b[j]] += pi[i]; fb4[b[j]] += pi[i]/3.; }
}
for (i=0; i<n; i++) {
ic=FROM61[i]; b[0]=ic/16; b[1]=(ic/4)%4; b[2]=ic%4;
if (com.codonf==F3x4 || com.codonf==F3x4MG)
pi[i] = fb3x4[b[0]]*fb3x4[4+b[1]]*fb3x4[8+b[2]];
else
pi[i] = fb4[b[0]]*fb4[b[1]]*fb4[b[2]];
}
if(com.codonf==F1x4MG)
for(j=0;j<3;j++)
xtoy(fb4, com.pf3x4+j*4, 4);
else if(com.codonf==F3x4MG)
xtoy(fb3x4, com.pf3x4, 12);
abyx (1./sum(pi,n), pi, n);
GC[0] = (fb3x4[0+1]+fb3x4[0+3])*100;
GC[1] = (fb3x4[4+1]+fb3x4[4+3])*100;
GC[2] = (fb3x4[8+1]+fb3x4[8+3])*100;
/* fprintf(frst1, "\tGC123\t%.1f\t%.1f\t%.1f", GC[0],GC[1],GC[2]); */
}
return 0;
}
double lfun2dSdN (double x[], int np)
{
/* likelihood function for calculating dS and dN between 2 sequences,
com.z[0] & com.z[1:
f(i,j) = \pi_i * p_{ij}(t)
Data are clean and coded.
Transition probability pijt is calculated for observed patterns only.
*/
int n=com.ncode, h,i,k, ik, z0,z1;
double lnL=0, fh,expt[NCODE], mr=0;
NFunCall++;
k=1, ik=0;
if(com.hkyREV==0) {
if(com.fix_kappa==1) { com.pkappa[0] = com.kappa; ik = 1; }
else com.kappa = x[k]; /* Is this necessary? */
}
for(i=0; i<(com.hkyREV ? 5 : !com.fix_kappa); i++)
com.pkappa[ik++] = x[k++];
if(com.codonf==FMutSel)
for(i=0; i<3; i++)
com.pkappa[ik++] = x[k++];
if(!com.fix_omega) com.omega = x[1+com.nkappa];
if(!com.fix_kappa || !com.fix_omega)
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr, com.pkappa, com.omega,PMat);
for(k=0; k<n; k++)
expt[k] = exp(x[0]*Root[k]);
for (h=0; h<com.npatt; h++) {
if(com.fpatt[h]<1e-20) continue;
z0 = com.z[0][h];
z1 = com.z[1][h];
for(k=0,fh=0;k<n;k++)
fh += U[z0*n+k]*expt[k]*V[k*n+z1];
fh *= com.pi[z0];
if(fh<=0) {
matout(F0,x,1,np);
printf("lfun2dSdN: fh = %.9f\n",fh);
fh = 1e-70;
}
lnL -= log(fh) * com.fpatt[h];
}
return (lnL);
}
int VariancedSdN (double t, double omega, double vtw[2*2], double vdSdN[2*2])
{
/* This calculates the covariance matrix of dS & dN, using the
difference approximation, from the covariance matrix of t and
omega (vtw). com.kappa and com.pi are used. Sampling errors
in parameters other than t and omega, such as kappa and pi[],
are ignored.
JacobiSN = {{dS/dt, dS/dw}, {dN/dt,dN/dw}}
*/
int np=2;
double JacobiSN[2*2],T1[2*3],T2[2*3], S,dS,dN, dS1,dN1,dS2,dN2, eh, mr=0;
if(vtw[0]<=0 || vtw[3]<=0) {
puts("var(dS,dN) not calculable.");
zero(vdSdN,4);
return(-1);
}
/* printf("\nt & w: %.5f %.5f\n", t, omega);
matout(F0,vtw, 2,2); */
EigenQcodon(2,t,&S,&dS,&dN,NULL,NULL,NULL, &mr, com.pkappa,omega,PMat);
eh = (t+1)*Small_Diff;
EigenQcodon(2,t+eh,&S,&dS1,&dN1,NULL,NULL,NULL, &mr, com.pkappa,omega,PMat);
EigenQcodon(2,t-eh,&S,&dS2,&dN2,NULL,NULL,NULL, &mr, com.pkappa,omega,PMat);
JacobiSN[0*np+0] = (dS1 - dS2)/(2*eh);
JacobiSN[1*np+0] = (dN1 - dN2)/(2*eh);
eh = (omega+1)*Small_Diff;
EigenQcodon(2,t,&S,&dS1,&dN1,NULL,NULL,NULL, &mr, com.pkappa,omega+eh,PMat);
EigenQcodon(2,t,&S,&dS2,&dN2,NULL,NULL,NULL, &mr, com.pkappa,omega-eh,PMat);
JacobiSN[0*np+1] = (dS1 - dS2)/(2*eh);
JacobiSN[1*np+1] = (dN1 - dN2)/(2*eh);
matby(JacobiSN,vtw,T1,2,2,2);
mattransp2 (JacobiSN, T2, 2, 2);
matby(T1,T2,vdSdN,2,2,2);
/* matout(F0,vdSdN, 2,2); */
return (0);
}
double distanceHKY85 (double x[], double *kappa, double alpha);
int distance3pos(double dHKY[], double kHKY[], int *sites4, char *z1, char *z2);
int distance3pos(double dHKY[], double kHKY[], int *sites4, char *z1, char *z2)
{
/* This calculates nucleotide-based distances between two protein-coding
DNA sequences z1 and z2, both of which are coded. com.cleandata = 1 is
assumed.
*/
int i,j, h, k, ic1, ic2, from[3], to[3];
double fij[4][16]={{0}}, pi4[4]={0};
/* [0,1,2] are for 3 positions, [3] is for 4-fold */
for (h=0; h<com.npatt; h++) {
ic1=FROM61[(int)z1[h]]; from[0]=ic1/16; from[1]=(ic1/4)%4; from[2]=ic1%4;
ic2=FROM61[(int)z2[h]]; to[0]=ic2/16; to[1]=(ic2/4)%4; to[2]=ic2%4;
for(k=0; k<3; k++)
fij[k][from[k]*4+to[k]] += com.fpatt[h]/com.ls;
if(from[0]==to[0] && from[1]==to[1] && FourFold[to[0]][to[1]])
fij[3][from[2]*4+to[2]] += com.fpatt[h];
}
*sites4 = (int) sum(fij[3], 16);
if(*sites4)
FOR(k,16) fij[3][k] /= *sites4;
FOR(i,4) FOR(j,4) pi4[i] += fij[3][i*4+j]/2;
FOR(i,4) FOR(j,4) pi4[j] += fij[3][i*4+j]/2;
for(k=0; k<4; k++)
dHKY[k] = distanceHKY85(fij[k], &kHKY[k], 0);
return(0);
}
int PairwiseCodon (FILE *fout, FILE*fds, FILE*fdn, FILE*ft, double space[])
{
/* Calculates ds & dn for all pairwise codon sequence comparisons.
It uses different npatt for different pairs.
The data com.z[] should be encoded clean data, with ambiguity characters
removed. Think of what to do with raw unclean data.
JacobiSN has two columns, the 1st are deratives of dS (dS/dt, dS/dk, dS/dw)
and the second of dN.
*/
char *pz0[NS],codon[2][3]; /* pz0, npatt0, & fpatt0 hold the old information */
int npatt0=com.npatt;
double *fpatt0, ls0=com.ls;
float fp[NCODE*NCODE];
int n=com.ncode, is,js,j,k,h, i0,np, wname=15;
int nb[3],ib[3][4],ic[2], missing=0, sites4;
double x[10]={.9,1,.5,.5,.5,.5,.3}, xb[10][2]={{1e-5,50}}, large=50;
double kappab[2]={.01,999}, omegab[2]={.001,99};
double lnL, e=1e-7, *var=space+NP, S,dS,dN, mr=0;
double JacobiSN[2*3],T1[2*3],T2[2*3],vSN[2*2], dS1,dN1,dS2,dN2,y[3],eh;
/* for calculating SEs of dS & dN */
double dHKY[4], kHKY[4];
fpatt0=(double*)malloc(npatt0*3*sizeof(double));
FOR(k,com.ns) pz0[k]=com.z[k];
com.z[0] = (char*)(fpatt0+npatt0);
com.z[1] = com.z[0]+npatt0;
FOR (k,npatt0) fpatt0[k] = (float)com.fpatt[k];
if(!com.cleandata) puts("\nPairwiseCodon: pairwise deletion.");
if (com.ngene>1 && com.Mgene==1) puts("ngene>1 to be tested.");
if (noisy>1) printf("\npairwise comparison (Goldman & Yang 1994).\n");
fprintf(fout,"\npairwise comparison, codon frequencies: %s.\n",
codonfreqs[com.codonf]);
FOR(j,com.nkappa) { xb[1+j][0]=kappab[0]; xb[1+j][1]=kappab[1]; }
if(!com.fix_omega) { k=1+com.nkappa; xb[k][0]=omegab[0]; xb[k][1]=omegab[1]; }
fprintf(fds,"%6d\n", com.ns); fprintf(fdn,"%6d\n", com.ns);
fprintf(ft,"%6d\n", com.ns);
fprintf(frst, "\n\npairwise comparison (Goldman & Yang 1994)");
fprintf(frst,
"\nseq seq N S dN dS dN/dS Paras.\n");
for(is=0;is<com.ns;is++) {
fprintf(fds,"%-*s ", wname,com.spname[is]);
fprintf(fdn,"%-*s ", wname,com.spname[is]);
fprintf(ft,"%-*s ", wname,com.spname[is]);
for(js=0; js<is; js++) {
if(noisy>9) {
puts("\nInput the pair i & j (i>j) for dN-dS calculation? ");
scanf("%d%d",&is,&js);
is--; js--;
if(is>com.ns || js<0 || is<js) error2("invalid pair");
}
if(noisy>1) printf ("\n%4d vs. %3d", is+1, js+1);
fprintf(fout,"\n\n%d (%s) ... %d (%s)",
is+1,com.spname[is], js+1,com.spname[js]);
fprintf (frst, "%3d %3d ", is+1, js+1);
if(noisy>2) fprintf(frub, "\n\n%d (%s) ... %d (%s)",
is+1,com.spname[is], js+1,com.spname[js]);
for(k=0; k<n*n; k++) fp[k]=0;
if(com.cleandata) {
for(h=0; h<npatt0; h++) {
j = max2(pz0[is][h],pz0[js][h]);
k = min2(pz0[is][h],pz0[js][h]);
fp[j*n+k] += (float)fpatt0[h];
}
}
else {
for(h=0,com.ls=0; h<npatt0; h++) {
FOR(i0,2) FOR(k,3) codon[i0][k] = pz0[i0==0 ? is : js][h*3+k];
for(i0=0,missing=0; i0<2; i0++) {
for(k=0; k<3; k++)
NucListall(codon[i0][k], &nb[k], ib[k]);
if(nb[0]*nb[1]*nb[2]!=1)
{ missing=1; break; }
else
ic[i0] = FROM64[ ib[0][0]*16+ib[1][0]*4+ib[2][0] ];
}
if(missing) continue;
com.ls += (int)fpatt0[h];
j = max2(ic[0],ic[1]);
k = min2(ic[0],ic[1]);
fp[j*n+k] += (float)fpatt0[h];
}
}
for(j=0,com.npatt=0;j<n;j++) {
for(k=0; k<j+1; k++)
if(fp[j*n+k]) {
com.z[0][com.npatt] = (char)j;
com.z[1][com.npatt] = (char)k;
com.fpatt[com.npatt++] = fp[j*n+k];
}
}
if(noisy>2) printf("\n npatt=%d ",com.npatt);
for(j=0,zero(com.pi,n); j<com.npatt; j++) {
com.pi[(int)com.z[0][j]] += com.fpatt[j]/(2.*com.ls);
com.pi[(int)com.z[1][j]] += com.fpatt[j]/(2.*com.ls);
}
GetCodonFreqs2 ();
distance3pos(dHKY, kHKY, &sites4, com.z[0], com.z[1]);
np = com.np = (com.ntime=1) + com.nkappa + !com.fix_omega;
NFunCall = 0;
/* initial values and bounds */
x[0] = SeqDistance[is*(is-1)/2+js]*(0.8+0.3*rndu());
if(x[0]>3) x[0]=1.5+rndu();
if(x[0]<1e-6) x[0]=.5*rndu();
if(com.nkappa==1) { /* HKY type model */
if(is==0 && js==1) x[1] = (com.icode==1?4:1.5)+rndu();
else x[1] = (x[1]*2+2+rndu())/3;
if(x[1]>10) x[1] = 5;
xb[1][0] = 0.4;
}
else /* REV or FMutSel models, do something later */
for(j=1,x[1]=.8+.4*rndu(); j<com.nkappa; j++)
x[1+j] = .2+.4*rndu();
if(!com.fix_omega) {
k = 1+com.nkappa;
if(is==0 && js==0) x[k] = 0.2+0.2*rndu();
else x[k] = (3*x[k]+0.6*rndu())/4;
x[k] = max2(x[k],0.01);
x[k] = min2(x[k],2);
}
if(noisy>=9) {
FPN(F0); FOR(k,np) printf(" %12.6f",x[k]); FPN(F0);
FOR(k,np) printf(" %12.6f",xb[k][0]); FPN(F0);
FOR(k,np) printf(" %12.6f",xb[k][1]); FPN(F0);
}
if(com.fix_kappa && com.fix_omega)
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr, com.pkappa,com.omega,PMat);
if(np)
ming2(noisy>3?frub:NULL,&lnL,lfun2dSdN,NULL,x,xb, space,e,np);
else { x[1]=x[2]=com.kappa=com.omega=0; lnL=0; }
lnLmodel = lnL;
fprintf(fout,"\nlnL =%12.6f\n",-lnL);
FOR(k,np) fprintf(fout," %8.5f",x[k]); FPN(fout);
if(noisy>2) {
printf("\n\nt_NG = %.5f\tMLEs: ", SeqDistance[is*(is-1)/2+js]);
for(k=0;k<np;k++) printf(" %.5f", x[k]);
}
if (np && com.getSE) {
Hessian(np, x, lnL, space, var, lfun2dSdN, var+np*np);
matinv(var, np, np, var+np*np);
fprintf(fout,"SEs for parameters:\n");
FOR(k,np) fprintf(fout," %8.5f",(var[k*np+k]>0.?sqrt(var[k*np+k]):-0));
FPN(fout);
}
FPN(fout);
EigenQcodon(2,x[0],&S,&dS,&dN, NULL,NULL,NULL, &mr, com.pkappa,com.omega,PMat);
if(noisy>=3) {
puts("\nNucleotide-based analysis (approximate MLEs; use baseml to get proper MLEs):");
printf("\ndHKY (123-4):"); FOR (k,4) printf(" %8.5f", dHKY[k]);
printf("\nkHKY (123-4):"); FOR (k,4) printf(" %8.5f", kHKY[k]);
printf(" (%d four-fold sites)\n", sites4);
}
fprintf(fds," %7.4f",dS); fprintf(fdn," %7.4f",dN);
fprintf(ft," %7.4f",x[0]);
fprintf (fout,
"t= %6.4f S= %7.1f N= %7.1f dN/dS= %7.4f dN =%7.4f dS =%7.4f\n",
x[0], S, com.ls*3-S, com.omega, dN, dS);
fprintf(frst,"%8.1f %8.1f %8.4f %8.4f %8.4f", com.ls*3-S, S, dN, dS, com.omega);
for(k=0; k<np; k++) fprintf(frst," %8.4f",x[k]);
for(k=0; k<np; k++) fprintf(frst1,"\t%.4f",x[k]);
fprintf(frst1,"\t%.3f", -lnL);
fprintf(frst1,"\t%.4f\t%.4f", dN, dS);
k=np-1;
if (com.getSE)
fprintf(frst," +-%6.4f",(var[k*np+k]>0.?sqrt(var[k*np+k]):-1));
fprintf(frst," %9.3f\n",-lnL);
if(com.getSE && !com.fix_omega) {
FOR(k, np) {
FOR(j,np) y[j] = x[j];
y[k] += (eh=(x[k]+1)*Small_Diff);
if(!com.fix_kappa) com.kappa = y[1];
com.omega = y[1+!com.fix_kappa];
EigenQcodon(2,y[0],&S,&dS1,&dN1,NULL,NULL,NULL, &mr, com.pkappa,com.omega,PMat);
y[k] -= 2*eh;
if(!com.fix_kappa) com.kappa = y[1];
com.omega = y[1+!com.fix_kappa];
EigenQcodon(2,y[0],&S,&dS2,&dN2,NULL,NULL,NULL, &mr, com.pkappa,com.omega,PMat);
JacobiSN[0*np+k] = (dS1-dS2)/(2*eh);
JacobiSN[1*np+k] = (dN1-dN2)/(2*eh);
}
matby(JacobiSN, var, T1, 2, np, np);
mattransp2(JacobiSN, T2, 2, np);
matby(T1,T2,vSN,2,np,2);
/*
fputs("\nvar(dS,dN):\n", fout);
matout(fout,vSN,2,2);
*/
fprintf(fout,"dN = %7.5f +- %.5f dS = %7.5f +- %.5f",
dN,(vSN[3]>0?sqrt(vSN[3]):-0),dS,(vSN[0]>0?sqrt(vSN[0]):-0));
fprintf(fout," (by method 1)\n");
T1[0] = var[0];
T1[1] = T1[2] = var[0*np+np-1];
T1[3] = var[(np-1)*np+(np-1)];
if(com.getSE && !com.fix_omega)
VariancedSdN(x[0], x[np-1], T1, vSN);
fprintf(fout,"dN = %7.5f +- %.5f dS = %7.5f +- %.5f",
dN,(vSN[3]>0?sqrt(vSN[3]):-0),dS,(vSN[0]>0?sqrt(vSN[0]):-0));
fprintf(fout," (by method 2)\n");
}
fflush(frst); fflush(fout);
} /* for (js) */
FPN(fds); FPN(fdn); FPN(ft);
fflush(fds); fflush(fdn); fflush(ft);
} /* for (is) */
com.ls = (int)ls0; FOR(k,com.ns) com.z[k] = pz0[k];
com.npatt = npatt0; FOR(h,npatt0) com.fpatt[h] = fpatt0[h]; free(fpatt0);
return (0);
}
double lfun2AA (double t)
{
/* likelihood function for two amino acid sequences
prob(i,j) = PI_i * p(i,j,t)
The data are clean & coded (com.z[0] & com.z[1]).
Transition probability pijt is calculated for observed patterns only.
*/
int n=20, h,k, aa0,aa1;
double lnL=0, pijt,expt[20],al=com.alpha;
if(al==0) FOR(k,n) expt[k] = exp(t*Root[k]);
else FOR(k,n) expt[k] = pow(al/(al-t*Root[k]),al);
for(h=0; h<com.npatt; h++) {
aa0=com.z[0][h]; aa1=com.z[1][h];
for(k=0,pijt=0; k<n; k++)
pijt += U[aa0*n+k] * expt[k]*V[k*n+aa1];
lnL -= log(com.pi[aa0]*pijt)*com.fpatt[h];
}
return(lnL);
}
int _nestS=0; /* 189= estimate the S elements, 0= use those from com.daa[] */
static double *_Fij;
double lfun2AArev (double x[], int np)
{
/* np = _nestS + 19*3 + (1 or 2);
x[]: Q matrix, 3*pi, 1 or 2 blength
pi[0] is for the root, pi[1] & pi[2] are for Q[1] & Q[2] for the 2 branches.
See notes in PairwiseAArev().
*/
int i,j,k, n=20;
double pi[3][20], *p, Q[3][400], *T=Q[0], *Fe=Q[0], t,t1,t2, m1,m2, lnL=0;
double space[20*20*2+20*2];
NFunCall++;
for(k=0; k<3; k++) {
for(i=0,p=pi[k],p[19]=1; i<n-1; i++) p[i] = x[_nestS+k*19+i];
for(i=0,t=0; i<n; i++) t+=p[i];
for(i=0; i<n; i++) p[i]/=t;
}
if(_nestS) {
for(i=0,k=0; i<n; i++) {
for(j=0,Q[1][i*n+i]=0; j<i; j++)
if(i*n+j != ijAAref)
Q[1][i*n+j]=Q[1][j*n+i] = x[k++];
}
Q[1][ijAAref] = Q[1][(ijAAref%n)*n+(ijAAref/n)] = 1;
}
else {
for(i=0; i<n; i++)
for(j=0,Q[1][i*n+i]=0; j<=i; j++)
Q[1][i*n+j] = Q[1][j*n+i] = com.daa[i*n+j];
}
for(i=0,m1=m2=0; i<n; i++) {
for(j=0,t1=t2=0;j<n;j++) {
Q[2][i*n+j] = Q[1][i*n+j]*pi[2][j];
Q[1][i*n+j] *= pi[1][j];
t1 += Q[1][i*n+j];
t2 += Q[2][i*n+j];
}
Q[1][i*n+i] = -t1;
Q[2][i*n+i] = -t2;
m1 += pi[1][i]*t1;
m2 += pi[2][i]*t2;
}
if(com.ntime==1) { t1 = x[np-1]/2/m1; t2 = x[np-1]/2/m2; }
else { t1 = x[np-2]/m1; t2 = x[np-1]/m2; }
PMatQRev(Q[1], pi[1], t1, n, space);
PMatQRev(Q[2], pi[2], t2, n, space);
for(i=0; i<n*n; i++) Fe[i]=0;
for(k=0;k<n;k++)
for(i=0;i<n;i++)
for(j=0,t=pi[0][k]*Q[1][k*n+i]; j<n; j++)
Fe[i*n+j] += t*Q[2][k*n+j];
/* if(fabs((t=sum(Fe,n*n))-1)>1e-6) printf("Fe = %.9f != 1\n", t); */
for(i=0; i<n*n; i++) {
if(_Fij[i]<=1e-15) continue;
if(Fe[i]>1e-200)
lnL -= _Fij[i]*log(Fe[i]);
else
printf("Fij_exp = %.10f < 0\n", Fe[i]);
}
return(lnL);
}
double PairwiseAArev (int is, int js)
{
/* This calculates pairwise distance under a nonstationary model. It assumes
three sets of amino acid frequencies: pi[0] for the root, pi[1] and pi[2]
are for the Q matrices for branches 1 and 2.
It estimate the symmetrical part of the rate matrix if _nestS==189.
If _nestS==0, it uses the symmetrical part read from the com.daa file.
It can estimate 1 or 2 distances depending on com.ntime=1 or 2.
np = 189 + 19*3 + (1 or 2);
x[]: Q matrix, 3*pi, 1 or 2 blength
*/
int n=com.ncode, h,i,j,k, np=_nestS+ 19*3 + 1;
double Fij[400], x[248], xb[248][2], lnL, e=1e-9, t=0, p[20];
com.ntime=1; /* 1: t1=t2; 2: t1 and t2 */
if(com.ntime==2) np++;
_Fij=Fij;
if(com.cleandata!=1) error2("cleandata");
if(com.sspace < spaceming2(np)) {
com.sspace = spaceming2(np);
printf ("\nspace adjusted to %9lu bytes\n",com.sspace);
if((com.space=(double*)realloc(com.space,com.sspace))==NULL)
error2("oom space");
}
for(h=0,zero(Fij,n*n); h<com.npatt; h++) {
Fij[com.z[is][h]*n+com.z[js][h]] += com.fpatt[h]/com.ls;
}
if(_nestS) {
for (i=1,k=0; i<n; i++) FOR(j,i)
if(i*n+j!=ijAAref) x[k++] = (com.daa[i*n+j]+.001)*(0.8+0.4*rndu());
}
for(i=0;i<np;i++) {
x[i]=rndu(); xb[i][0]=1e-5; xb[i][1]=100;
}
lnL = lfun2AArev(x,np);
printf("\nlnL0 = %12.6f\n",-lnL);
ming2(noisy>2?frub:NULL,&lnL,lfun2AArev,NULL,x,xb, com.space, e, np);
for(k=0; k<3; k++) {
for(i=0,p[19]=1; i<n-1; i++) p[i]=x[_nestS+k*19+i];
for(i=0,t=0; i<n; i++) t+=p[i];
for(i=0; i<n; i++) p[i]/=t;
matout2(F0, p, 1, n, 7, 4);
}
return (x[_nestS + 19*3]);
}
int PairwiseAA (FILE *fout, FILE*f2AA)
{
/* Calculates pairwise distances using amino acid seqs.
Data (com.z[]) are clean and coded.
com.npatt for the whole data set is used which may be greater than
the number of patterns for each pair.
SE is not calculated.
*/
char *pz0[NS];
int n=com.ncode, j, is,js;
double x, xb[2]={0,19}, lnL, step;
if (com.ngene>1 && com.Mgene==1) error2("ngene>1 to be tested.");
if (noisy) printf("\npairwise ML distances of AA seqs.\n\n");
/*
if(com.model>Empirical_F) error2("PairwiseAA: model wrong");
*/
if(com.model==0) fillxc(com.pi,1./n, n);
if(com.model>=Empirical) GetDaa(NULL, com.daa);
if(com.model==0 || com.model==Empirical)
EigenQaa(NULL, Root, U, V, NULL);
FOR(j,com.ns) pz0[j]=com.z[j];
fprintf(fout,"\nML distances of aa seqs.\n");
if(com.alpha)
fprintf(fout,"\nContinuous gamma with alpha = %.3f is used (ncatG is ignored).\n\n",com.alpha);
fprintf(f2AA,"%6d\n", com.ns);
for(is=0; is<com.ns; is++,FPN(F0),FPN(fout),FPN(f2AA)) {
printf ("%4d vs", is+1);
fprintf(f2AA,"%-14s ", com.spname[is]);
fprintf(fout,"%-14s ", com.spname[is]);
for(js=0; js<is; js++) {
if(com.model==REVaa) {
x = PairwiseAArev(is, js);
fprintf(f2AA," %7.4f",x); fprintf(fout," %7.4f",x);
continue;
}
com.z[0]=pz0[is]; com.z[1]=pz0[js];
printf (" %2d", js+1);
if(com.model==1||com.model==Empirical_F) {
for (j=0,zero(com.pi,n); j<com.npatt; j++) {
com.pi[(int)com.z[0][j]]+=com.fpatt[j];
com.pi[(int)com.z[1][j]]+=com.fpatt[j];
}
abyx(1./sum(com.pi,n), com.pi, n);
EigenQaa(NULL,Root,U,V,NULL);
}
/* com.posG[1]=com.npatt; */
xb[0]=SeqDistance[is*(is-1)/2+js]; x=xb[0]*1.5; step=xb[0];
LineSearch(lfun2AA, &lnL, &x, xb, step, 1e-7);
fprintf(f2AA," %7.4f",x); fprintf(fout," %7.4f",x);
if (com.getSE) ;
} /* for (js) */
} /* for (is) */
FOR(j,com.ns) com.z[j]=pz0[j];
return (0);
}
char GetAASiteSpecies(int species, int sitepatt)
{
/* this returns the amino acid encoded by the codon at sitepatt in species.
Returns '*' if more than two amino acids or '-' if codon is --- or ***.
*/
int n=com.ncode, c, naa, k;
char aa, newaa;
if(com.seqtype!=1)
error2("GetAASiteSpecies() right now works for codon seqs only. Check.");
c = com.z[species][sitepatt];
if(c<n) {
aa = AAs[ GeneticCode[com.icode][FROM61[c]] ];
}
else { /* naa is = 1 or >1, precise value being incorrect. */
for(k=0,aa=-1; k<nChara[c]; k++) {
newaa = GeneticCode[com.icode][FROM61[ CharaMap[c][k] ]];
if(newaa==-1) continue;
newaa = AAs[newaa];
if(aa==-1) {
naa = 1;
aa = newaa;
}
else
if(newaa != aa) naa++;
}
if(nChara[c]==n) aa = '-';
else if(naa>1) aa = '*';
}
return (aa);
}
int PrintProbNSsites (FILE* frst, double prob[], double meanw[], double varw[], int ncat, int refsp)
{
/* This prints out posterior probabilities that each site is from a site class
under the NSsites mdoels (model=0).
This is called by both the old empirical Bayes routine (NEB) and also the new
Bayes empirical Bayes (BEB) routine.
*/
int h, hp, it, ir, lst=(com.readpattern?com.npatt:com.ls);
double psel=0, wpos=1, cutoff=0.5;
double mpostp[NCATG];
char *sig, aa;
char codons[2][4];
double St, Nt, ns, na, ndiff;
if(com.model==0) {
fprintf(frst," & postmean_w");
if(!BayesEB && com.rK[ncat-1]>1) fprintf(frst," & P(w>1)");
}
fprintf(frst,"\n(amino acids refer to 1st sequence: %s)\n\n", com.spname[refsp]);
zero(mpostp, com.ncatG);
for(h=0; h<lst; h++,FPN(frst)) {
hp = (!com.readpattern ? com.pose[h] : h);
aa = GetAASiteSpecies(refsp, hp);
fprintf(frst,"%4d %c ", h+1, aa);
for (ir=0,it=0,psel=0; ir<ncat; ir++) {
fprintf(frst," %5.5f", prob[ir*com.npatt+hp]);
if(prob[ir*com.npatt+hp] > prob[it*com.npatt+hp])
it = ir;
if(!BayesEB && com.model==0)
if(com.rK[ir] > 1) psel += prob[ir*com.npatt+hp];
mpostp[ir] += prob[ir*com.npatt+hp]/com.ls;
}
fprintf(frst, " (%2d)", it+1);
if(com.model==0) {
fprintf(frst, " %6.3f", meanw[hp]);
if(!BayesEB && psel) fprintf(frst, " %6.3f", psel);
if(BayesEB==1 && com.model==0)
fprintf(frst, " +- %6.3f", varw[hp]);
}
}
/*
if(!BayesEB) {
printf("\nmean posterior probabilities for site classes");
matout(F0, mpostp, 1, com.ncatG);
matout(F0, com.freqK, 1, com.ncatG);
}
*/
/* list of positively selected sites */
if(com.model==0) { /* NSsites models */
if(com.NSsites!=1 && com.NSsites!=7)
fprintf(frst,"\nPositively selected sites\n\n\tProb(w>1) mean w\n\n");
for(ir=0,it=0; ir<ncat; ir++)
if(BayesEB==1 || (com.freqK[ir]>.1/com.ls && com.rK[ir]>wpos)) it=1;
if(!com.aaDist && it) {
fprintf(fout,"\nPositively selected sites (*: P>95%%; **: P>99%%)\n");
fprintf(fout,"(amino acids refer to 1st sequence: %s)\n\n", com.spname[refsp]);
fprintf(fout," Pr(w>1) %25s\n\n", "post mean +- SE for w");
for(h=0; h<lst; h++) {
hp=(!com.readpattern ? com.pose[h] : h);
if(BayesEB==1)
psel = prob[(ncat-1)*com.npatt+hp];
else
for (ir=0,psel=0; ir<ncat; ir++)
if(com.rK[ir]>wpos) psel+=prob[ir*com.npatt+hp];
if(psel>cutoff) {
sig = " ";
if(psel>.95) sig = "* ";
if(psel>.99) sig = "**";
aa = GetAASiteSpecies(refsp, hp);
fprintf(fout,"%6d %c %10.3f%-8s %.3f", h+1, aa, psel, sig, meanw[hp]);
fprintf(frst,"%6d %c %10.3f%-8s %.3f", h+1, aa, psel, sig, meanw[hp]);
if(BayesEB==1 && com.model==0) {
fprintf(fout, " +- %5.3f", varw[hp]);
fprintf(frst, " +- %5.3f", varw[hp]);
}
/*********** print out both codons if 2 sequences ******/
/*
if(com.ns==2) {
codons[0] = CODONs[com.z[0][hp]]);
codons[1] = CODONs[com.z[1][hp]]);
ndiff=difcodonNG(codons[0], codons[1], &St,&Nt,&ns,&na,0,com.icode);
fprintf(fout,"\t%3s %3s %2.0f diff (ps pn: %5.3f %5.3f)", codons[0], codons[1], ndiff, ns/St, na/Nt);
}
*/
FPN(fout); FPN(frst);
}
}
FPN(fout);
if(!BayesEB==1 && com.rK[ncat-2]>wpos)
fputs("\nNote: more than one w>1. Check rst for details\n",fout);
}
}
return(0);
}
int lfunNSsites_rate (FILE* frst, double x[], int np)
{
/* This calculates the dN/dS rates for sites under models with variabel dN/dS
ratios among sites (Nielsen and Yang 1998). Modified from lfundG()
com.fhK[] holds the posterior probabilities.
*/
int h,hp, ir, it=0, refsp=0, k=com.ntime+com.nrgene+com.nkappa;
double lnL=0, fh;
double w2=x[com.np-1],psel=0, *meanw, maxmw, minmw, wpos=1.1, cutoff=0.5;
char *sig, aa;
FILE *fsites, *fras;
int continuous=0, R,G,B;
int lst=(com.readpattern?com.npatt:com.ls);
int ncolors=5; /* continuous = 0 uses the specified colors */
char sitelabel[96], *colors[5]={"darkblue", "lightblue", "purple", "pinkred", "red"};
char *colorvalues[5]={"[2,2,120]", "[133,57,240]", "[186,60,200]", "[200,60,160]", "[250,5,5]"};
if(com.nparK) error2("lfunNSsites_rate to be done for HMM.");
if((meanw=(double*)malloc(com.npatt*sizeof(double)))==NULL)
error2("oom lfunNSsites_rate"); /* meanw useful for NSsites only */
if(com.aaDist==0)
printParametersNSsites(frst,x);
else
fputs("\nCheck main result file for parameter estimates\n", frst);
fx_r(x, np);
if(com.NnodeScale)
FOR(h,com.npatt) {
for(ir=1,fh=com.fhK[h]; ir<com.ncatG; ir++)
if(com.fhK[ir*com.npatt+h]>fh) fh=com.fhK[ir*com.npatt+h];
for(ir=0; ir<com.ncatG; ir++)
com.fhK[ir*com.npatt+h]=exp(com.fhK[ir*com.npatt+h]-fh);
lnL-=fh*com.fpatt[h];
}
for(h=0; h<com.npatt; h++) {
for (ir=0,fh=meanw[h]=0; ir<com.ncatG; ir++) {
fh += (com.fhK[ir*com.npatt+h]*=com.freqK[ir]); /* prior=>posterior */
meanw[h] += com.fhK[ir*com.npatt+h]*com.rK[ir];
}
for (ir=0,meanw[h]/=fh; ir<com.ncatG; ir++) com.fhK[ir*com.npatt+h]/=fh;
lnL -= com.fpatt[h]*log(fh);
}
fprintf(frst,"\nNaive Empirical Bayes (NEB) probabilities for %d classes",com.ncatG);
if(com.model==0 && com.NSsites && com.NSsites!=1 && com.NSsites!=7)
fprintf(fout,"\nNaive Empirical Bayes (NEB) analysis");
PrintProbNSsites(frst, com.fhK, meanw, NULL, com.ncatG, refsp);
if(com.model && com.model<=NSbranch2) { /* branch&site models */
if(com.rK[0]>wpos || com.rK[1]>wpos) { /* positive sites for all lineages */
fputs("\n\nPositive sites for all lineages Prob(w>1):\n",fout);
for(h=0; h<lst; h++) {
hp=(!com.readpattern ? com.pose[h] : h);
aa = GetAASiteSpecies(refsp, hp);
psel = 0;
if(com.rK[0]>wpos) psel = com.fhK[0*com.npatt+hp];
if(com.rK[1]>wpos) psel += com.fhK[1*com.npatt+hp];
if(psel>cutoff) {
sig = "";
if(psel>.95) sig = "*";
if(psel>.99) sig = "**";
fprintf(fout, "%6d %c %.3f%s\n", h+1, aa, psel, sig);
}
}
}
if(w2>wpos && (com.freqK[com.ncatG-1]>1e-6)) { /* for foreground branches */
fprintf(fout,"\nNaive Empirical Bayes (NEB) analysis (please use the BEB results.)");
fprintf(fout,"\nPositive sites for foreground lineages Prob(w>1):\n\n");
for(h=0; h<lst; h++) {
hp=(!com.readpattern ? com.pose[h] : h);
aa = GetAASiteSpecies(refsp, hp);
psel = com.fhK[2*com.npatt+hp]+com.fhK[3*com.npatt+hp];
if(psel>cutoff) {
sig = "";
if(psel>.95) sig = "*";
if(psel>.99) sig = "**";
fprintf(fout, "%6d %c %.3f%s\n", h+1, aa, psel, sig);
}
}
}
}
fprintf (frst,"\n\nlnL = %12.6f\n", -lnL);
/* RasMol script for coloring structure */
if(com.verbose && com.model==0) {
fsites=(FILE*)fopen("SiteNumbering.txt", "r");
if(fsites) {
puts("\nCollecting RasMol commands for coloring structure into RasMol.txt");
printf("Choose color scheme (0: %d colors, 1: white->red, 2: rainbow) ",ncolors);
scanf("%d", &continuous);
fras = (FILE*)gfopen("RasMol.txt", "w");
for(h=0,maxmw=0,minmw=99; h<com.npatt; h++) {
if(maxmw < meanw[h]) maxmw = meanw[h];
if(minmw > meanw[h]) minmw = meanw[h];
}
if(continuous == 0)
for (it=0; it<ncolors; it++)
printf("\t%-10s %-20s mean_w < %7.5f\n",
colors[it], colorvalues[it], (it+1)*(maxmw-minmw)/ncolors);
fprintf(fras, "cartoon\nset background white\n");
for(h=0; h<lst; h++) {
fscanf(fsites, "%d%s", &it, sitelabel);
if(it-1!=h) { puts("Site number wrong. Giving up."); break; }
if(strchr(sitelabel, '?')) continue;
hp = (!com.readpattern ? com.pose[h] : h);
if(continuous==0) {
for (it=0; it<ncolors; it++)
if(meanw[hp]<minmw+(it+1.)*(maxmw-minmw)/ncolors+1e-9) break;
fprintf(fras,"select %s\n\t\tcolor %s\n", sitelabel, colorvalues[it]);
}
else if (continuous==1) {
it = 5+(int)(245*(meanw[hp]-minmw)/(maxmw-minmw+1e-9));
fprintf(fras,"select %s\n\t\tcolor [250, %d, %d]\n", sitelabel, 255-it,255-it);
}
else {
rainbowRGB((meanw[hp]-minmw)/(maxmw-minmw+1e-9), &R, &G, &B);
fprintf(fras, "select %s\n\t\tcolor [%d, %d, %d]\n", sitelabel, R,G,B);
}
}
fclose(fsites); fclose(fras);
}
}
free(meanw);
if(com.model==0 && (com.NSsites==NSpselection || com.NSsites==NSbetaw)
&& (com.fix_omega!=1 || com.omega!=1)) /* BEB for M2 & M8 */
lfunNSsites_M2M8(frst, x, com.np);
if(!com.fix_omega && (com.model==2 || com.model==3) && com.NSsites==2) /* BEB for branchsite A & clade C */
lfunNSsites_AC(frst, x, com.np);
return (0);
}
#ifdef NSSITESBandits
void finishup(void)
{
FILE *fend=NULL;
fend=(FILE*)gfopen("finished","w");
fclose(fend);
}
#endif
/*
(*) Codon models for variable dN/dS ratios among sites
(com.nrate includes kappa & omega) (see also CDFdN_dS)
NSsites npara
0 one-ratio 0: one ratio for all sites
1 neutral 1: p0 (w0=0, w1=1)
2 selection 3: p0, p1, w2 (w0=0, w1=1)
3 discrete 2K-1: p0,p1,..., and w0,w1,...
4 freqs K: p's (w's are fixed)
5 gamma 2: alpha, beta
6 2gamma 4: p0, alpha1,beta1, alpha2=beta2
7 beta 2: p_beta, q_beta
8 beta&w 4: p0, p_beta, q_beta, w estimated
9 beta&gamma 5: p0, p_beta, q_beta, alpha, beta
10 beta&1+gamma 5: p0, p_beta, q_beta, alpha, beta (1+gamma used)
11 beta&1>normal 5: p0, p_beta, q_beta, mu, s (normal truncated w>1)
12 0&2normal 5: p0, p1, mu2, s1, s2
13 3normal 6: p0, p1, mu2, s0, s1, s2
14 M8a:beta&w=1 3: p0, p_beta, q_beta, w=1 fixed
15 M8a:beta&w>=1 4: p0, p_beta, q_beta, w>=1 estimated
NSsites = 14 forces change to fix_omega so we can't have 2 models in one run.
NSsites = 15 would not set omegab[0] correctly for the next tree.
(*) Codon models for variable dN/dS ratios among both branches and sites
(model=2, NSsites=3 or 2)
(com.nrate includes kappa & omega)
Parameters include branchlens, kappa, p0, p1, w0, w1, w2
method = 0: SetPSiteClass copies w's to nodes[].omega and PMat is calculated
in ConditionalPNode().
method = 1: PMat for branch of interest is calulated in lfuntdd_SiteClass().
The two types of branches have different Qfactor_NS: Qfactor_NS_branch[2].
August 2000.
(*) Codon (perhaps aa as well) site-class models
NSsites=3, ncatG=3 or 2 etc
aaDist:
1-6 for G1974,Miyata,c,p,v,a
FIT1 & FIT2 (11, 12): fitness model F_j = a_p*(p-p*)^2+a_v*(v-v*)^2
FIT1: w_ij = exp(F_j - F_i)
FIT2: w_ij = b*exp(F_j - F_i)
FIT1 & FIT2 are also implemented for NSsites=0
(*) Amino acid models
REVaa: The symmetrical part (S) of the rate matrix Q=S*PI are estimated,
making up 19*20/2-1=189 rate parameters for the matrix. The aa
frequencies are estimated using the observed ones. The Sij for
ijAAref=19*naa+9 (I-V) is set to one and others are relative rates;
REVaa_0: AA1STEP[i*(i+1)+j] marks the aa pair i & j that are
interchangeable. Sij for ijAAref=19*naa+9 (I-V) is set to one
and others are relative rates;
(*)
Codon & amino acid models
AAClasses: OmegaAA[i*(i-1)/2+j] marks the dN/dS ratio class for the pair
i & j. Note kappa is before omega's in x[].
OmegaAA[i*(i-1)/2+j]=-1, if AAs i & j are not interchangeable
=0, for the background ratio
=1,...,nclass for AAs i & j specified in OmegaAA.dat.
The total number of classes (com.nOmega) is one plus the number
specified in the file OmegaAAf.
com.nOmega is the number of different dN/dS ratios in the NSbranchB, NSbranch2 models
and in AAClasses.
nodes[].label marks the dN/dS ratio for the node in the NSbranchB NSbranch2 models
AA1STEP[i*(i-1)/2+j] =1 if AAs i & j differ at one codon position;
=0 otherwise.
(*) Codon and amino acid models
aaDist = -5,-4,-3,-2,-1,1,2,3,4,5:
Geometric and linear relationships between amino acid distance and
substitution rate:
wij = a*(1-b*dij/dmax)
wij = a*exp(-b*dij/dmax)
aaDist = 0:equal, +:geometric; -:linear, {1-5:Grantham,Miyata,c,p,v}
aaDist = 11, 12: fitness models, see above.
*/
#if 0 /* routines for testing codon-models */
int GetCategoryQc (char z[NS])
{
/* the category ID for a codon site with z[NS], transformed
classified into 19 categories
*/
int i,j, icat, ncat=19, it, b[NS][3], nbase[3], markb[4];
puts("\nDo not work with missing data, GetCategoryQc.");
for (j=0; j<com.ns; j++) {
it=FROM61[(int)z[j]]; b[j][0]=it/16; b[j][1]=(it/4)%4; b[j][2]=it%4;
}
FOR (i,3) {
FOR (j,4) markb[j]=0;
FOR (j,com.ns) markb[b[j][i]]=1;
nbase[i]=markb[0]+markb[1]+markb[2]+markb[3]-1;
}
if(nbase[1]>=2) icat=ncat-1;
else {
if(nbase[0]>2) nbase[0]=2; if(nbase[2]>2) nbase[2]=2;
icat = nbase[1]*9+nbase[0]*3+nbase[2];
}
return (icat);
}
int TestModelQc (FILE * fout, double x[])
{
/* Test the Qc model, slower than simulations
*/
char z[NS];
int h, npatt, it, icat, j, nodeb[NS], imposs;
int n=Nsensecodon, isum, nsum, ncat=19;
double fh, y, nobs[19], pexp[19], Pt[8][NCODE*NCODE];
puts("\nDo not work with missing data, GetCategoryQc.");
puts("\ntest Qc..\n");
for (h=0,zero(nobs,ncat); h<com.npatt; h++) {
for (j=0; j<com.ns; j++) z[j]=com.z[j][h]-1;
icat = GetCategoryQc(z);
nobs[icat]+=com.fpatt[h];
}
FOR (j,ncat)
printf("cat #%4d: %4d%4d%4d%6.0f\n", j+1,j/9+1,(j/3)%3+1,j%3+1,nobs[j]);
if (com.ns>5 || com.alpha || com.ngene>1)
error2 ("TestModelQc: ns>5 || alpha>0.");
if (SetParameters (x)) puts ("\npar err..");
for (j=0,npatt=1; j<com.ns; j++) npatt*=n;
for (isum=0,nsum=1; isum<tree.nnode-com.ns; nsum*=n,isum++) ;
printf("\nTest Qc: npatt = %d\n", npatt);
FOR (j, tree.nbranch)
PMatUVRoot (Pt[j], nodes[tree.branches[j][1]].branch,n,U,V,Root);
for (h=0,zero(pexp,ncat); h<npatt; h++) {
for (j=0,it=h; j<com.ns; nodeb[com.ns-1-j]=it%n,it/=n,j++) ;
for (j=0,imposs=0; j<com.ns; j++)
{ z[j]=nodeb[j]; if (com.pi[(int)z[j]]==0) imposs=1; }
if (imposs) continue;
if ((icat=GetCategoryQc(z)) == ncat-1) continue;
if ((h+1)%100==0)
printf("\rTest Qc:%9d%4d%9.2f%%", h+1, icat, 100.*(h+1.)/npatt);
for (isum=0,fh=0; isum<nsum; isum++) {
for (j=0,it=isum; j<tree.nbranch-com.ns+1; j++)
{ nodeb[com.ns+j]=it%n; it/=n; }
for (j=0,y=com.pi[nodeb[tree.root]]; j<tree.nbranch; j++)
y*=Pt[j][nodeb[tree.branches[j][0]]*n+nodeb[tree.branches[j][1]]];
fh += y;
}
if (fh<=0) {
matout (F0, x, 1, com.np);
printf ("\a\ntest Qc: h=%4d fh=%9.4f \n", h, fh);
}
pexp[icat]+=fh;
}
pexp[ncat-1]=1-sum(pexp,ncat-1);
FOR (j,ncat)
fprintf(fout, "\ncat # %4d%4d%4d%4d%6.0f%10.5f%10.2f",
j+1, j/9+1, (j/3)%3+1, j%3+1, nobs[j], pexp[j], com.ls*pexp[j]);
return (0);
}
#endif
#if (DSDN_MC || DSDN_MC_SITES)
void SimulateData2s61(void)
{
/* This simulates two codon sequences and analyze using ML (GY94).
It generates site pattern freqs and then samples from them
to generate the seq data. Codons are coded as 0,1,...,60. There
is another routine of a similar name in the file dsdn.c where the
codons are coded as 0,1,...,63. The two routines should not be
mixed.
Note that com.pi[] is changed in the analysis but not reused to
calculate Efij[]
Ifdef (DSDN_MC_SITES), the data will be simulated with the NSsites models
but analysed assuming one omega for all sites, so the model is wrong.
*/
char infile[32]="in.codon2s", seqfile[32]="codonseq.tmp",str[4]="";
FILE *fseq, *fin;
int ir,nr=100, ip, i,j,k,h, n=Nsensecodon;
int npatt0=n*(n+1)/2, nobs[NCODE*NCODE];
int il,nil, ls[50]={0,200,500};
double y, x[6]={1,1,1},xb[6][2], S,dN,dS,dNt,dSt,om,lnL, mr=0;
double t0,kappa0,omega0=.5,pi0[NCODE], mx[6],vx[6],mse[6]; /* t,k,w,dS,dN */
double Efij[NCODE*(NCODE+1)/2], space[50000];
com.icode=0; com.seqtype=1; com.ns=2;
com.ncode=n; com.cleandata=1; setmark_61_64 ();
for(j=0; j<com.ns; j++)
com.z[j] = (char*) malloc(npatt0*sizeof(char));
if(com.z[com.ns-1]==NULL) error2("oom z");
if((com.fpatt=(double*)malloc(npatt0*sizeof(double)))==NULL)
error2("oom fpatt");
for(j=0; j<3; j++) { xb[j][0]=.0001; xb[j][1]=99; }
#if (DSDN_MC_SITES)
strcpy(infile,"in.codon2sSites");
#endif
printf("\nTwo codon seq. simulation for ML (GY94), input from %s\n",infile);
fin=gfopen(infile,"r");
fscanf (fin,"%d%d%d%d", &k,&nr, &com.codonf, &nil);
SetSeed(-1, 0);
printf("\n%d replicates, %s model for analysis\nLc:",
nr, codonfreqs[com.codonf]);
for(il=0; il<nil; il++)
fscanf(fin, "%d", &ls[il+1]);
matIout(F0, ls+1, 1, nil);
for(i=0,k=0; i<NCODE; i++) {
fscanf(fin,"%lf",&y);
if(GeneticCode[com.icode][i]>-1) pi0[k++]=y;
else if(y!=0)
error2("stop codon freq !=0");
}
printf("sum pi = 1 = %.6f\n", sum(pi0,n));
for(ip=0; ip<99; ip++) {
fscanf(fin, "%lf%lf", &t0,&kappa0);
if(t0<0) exit(0);
printf("\n\nParameter set %d\nt0 =%.2f kappa0 =%.2f\n",ip+1,t0,kappa0);
fprintf(frst,"\n\nParameter set %d\nt0 =%.2f kappa0 =%.2f\n",ip+1,t0,kappa0);
for(j=0; j<n; j++) com.pi[j] = pi0[j];
com.ls=1;
#if (DSDN_MC_SITES)
com.NSsites=3;
fscanf(fin,"%d", &com.ncatG);
for(i=0; i<com.ncatG; i++) fscanf(fin,"%lf", &com.freqK[i]);
for(i=0; i<com.ncatG; i++) fscanf(fin,"%lf", &com.rK[i]);
printf("\nSite classe model (K=%d)\np: ",com.ncatG);
for(i=0; i<com.ncatG; i++)
printf("%7.4f",com.freqK[i]);
printf("\nw: "); FOR(i,com.ncatG) printf("%7.4f",com.rK[i]); FPN(F0);
fprintf(frst,"\nSite classe model (K=%d)\np: ",com.ncatG);
for(i=0; i<com.ncatG; i++)
fprintf(frst,"%7.4f",com.freqK[i]);
fputs("\nw: ",frst); FOR(i,com.ncatG) fprintf(frst,"%7.4f",com.rK[i]); FPN(frst);
if(1-sum(com.freqK,com.ncatG))
error2("freqs do not sum to 1");
for(j=0,Qfactor_NS=0,dS=dN=0; j<com.ncatG; j++) {
freqK_NS = com.freqK[j];
EigenQcodon(2,1,&S,&dSt,&dNt,NULL,NULL,NULL, &mr, &kappa0,com.rK[j],PMat);
dS += freqK_NS*dSt;
dN += freqK_NS*dNt;
}
Qfactor_NS = 1/Qfactor_NS;
om = (dS>0?dN/dS:-1);
dS *= t0*Qfactor_NS;
dN *= t0*Qfactor_NS;
#else
fscanf(fin,"%lf", &omega0);
EigenQcodon(2,t0,&S,&dS,&dN, NULL,NULL,NULL, &mr, &kappa0,omega0,space);
om=omega0;
#endif
printf("\nCorrect values");
printf("\nS%%=%7.4f dS=%7.4f dN=%7.4f w=%7.4f\n",S/3,dS,dN,om);
fprintf(frst,"\nCorrect values");
fprintf(frst,"\nS%%=%7.4f dS=%7.4f dN=%7.4f w=%7.4f\n",S/3,dS,dN,om);
/* calculate Efij[], the site pattern probabilities */
FOR(j,n) com.pi[j]=pi0[j];
#if (DSDN_MC_SITES)
com.NSsites=3;
for(k=0,zero(Efij,npatt0); k<com.ncatG; k++) {
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr, &kappa0,com.rK[k],PMat);
PMatUVRoot(PMat, t0, n, U, V, Root);
for(i=0,h=0;i<n;i++) for(j=0;j<=i;j++) {
y=com.pi[i]*PMat[i*n+j];
Efij[h++] += (i==j?y:2*y) * com.freqK[k];
}
}
com.NSsites=0;
#else
EigenQcodon(1,-1,NULL,NULL,NULL,Root, U, V, &mr, &kappa0, omega0, PMat);
PMatUVRoot (PMat, t0, n, U, V, Root);
for(i=0,h=0;i<n;i++) for(j=0;j<=i;j++) { /* why for each il? */
y=com.pi[i]*PMat[i*n+j];
Efij[h++]=(i==j?y:2*y);
}
#endif
for(i=h=0,com.ls=1,com.npatt=npatt0;i<n;i++) for(j=0;j<=i;j++) {
com.z[0][h]=(char)i; com.z[1][h]=(char)j;
com.fpatt[h]=Efij[h]; h++;
}
if(fabs(1-sum(Efij,npatt0))>1e-6) error2("sum Efij != 1");
for(il=0; il<nil+1; il++) {
com.ls=ls[il];
if(com.ls==0) {
puts("\nML estimates from infinite data");
com.ls=1;
x[0]=t0*rndu(); x[1]=kappa0; x[2]=omega0*rndu();
GetCodonFreqs2 ();
ming2(NULL,&lnL,lfun2dSdN,NULL,x,xb, space,1e-10,3);
printf("lnL = %.6f\n",-lnL);
EigenQcodon(2,x[0],&S,&dS,&dN, NULL,NULL,NULL, &mr, &x[1],x[2],space);
printf("S%%=%7.4f dS=%7.4f dN=%7.4f w=%7.4f\n",S/3,dS,dN,x[2]);
fprintf(frst,"ML estimates from infinite data\nt=%7.4f k=%7.4f",x[0],x[1]);
fprintf(frst," S%%=%7.4f dS=%7.4f dN=%7.4f w=%7.4f\n",S/3,dS,dN,x[2]);
for(h=1;h<npatt0; h++) Efij[h]+=Efij[h-1];
puts("\nt & k & w & dS & dN");
fputs("\nLc & t & k & w & dS & dN\n",frst); fflush(frst);
continue;
}
printf("\nls = %d\n", com.ls);
for(ir=0,zero(mx,6),zero(vx,6),zero(mse,6); ir<nr; ir++) {
MultiNomial(com.ls, npatt0, Efij, nobs, NULL);
for(i=0,com.npatt=0,zero(com.pi,n);i<n;i++) for(j=0;j<=i;j++)
if(nobs[k=i*(i+1)/2+j]) {
com.z[0][com.npatt]=i; com.z[1][com.npatt]=j;
com.fpatt[com.npatt++]=nobs[k];
}
for(i=0,zero(com.pi,n); i<com.npatt; i++) {
com.pi[com.z[0][i]]+=com.fpatt[i]/(2.*com.ls);
com.pi[com.z[1][i]]+=com.fpatt[i]/(2.*com.ls);
}
GetCodonFreqs2 ();
x[0]=t0; x[1]=kappa0; x[2]=omega0;
/* printf("\nlnL=%9.6f\n",-lfun2dSdN(x,3)); */
ming2((noisy?F0:NULL),&lnL,lfun2dSdN,NULL,x,xb, space,1e-7,3);
EigenQcodon(2,x[0],&S,&x[3],&x[4], NULL,NULL,NULL, &mr, &x[1],x[2],space);
FOR(j,5) {
vx[j] += (x[j]-mx[j])*(x[j]-mx[j]);
mx[j] = (mx[j]*ir+x[j])/(ir+1.);
}
mse[0]+=square(x[2]-omega0);
printf("\r%4d%8.4f%8.4f%8.4f %8.4f%8.4f%8.4f%8.4f%8.4f",
ir+1,x[0],x[1],x[2],mx[0],mx[1],mx[2],mx[3],mx[4]);
#if 0
if(ir==9) {
fseq=gfopen(seqfile,"w");
fprintf(fseq,"%6d %6d\n", com.ns,com.ls*3);
for(i=0;i<2;i++,FPN(fseq),fflush(fseq)) {
fprintf(fseq,"seq.%-5d ", i+1);
FOR(h,com.npatt) FOR(k,(int)com.fpatt[h])
fprintf(fseq,"%s", getcodon(str,FROM61[com.z[i][h]]));
}
fclose(fseq); exit(0);
}
#endif
}
if(nr>1) { FOR(j,5) vx[j]=sqrt(vx[j]/(nr-1.)/nr); mse[0]=sqrt(mse[0]/nr); }
fprintf(frst,"%4d ", com.ls);
FOR(i,5) fprintf(frst,"%7.4f +%7.4f", mx[i],vx[i]); FPN(frst);
} /* for (ii) */
} /* for(ip) */
exit(0);
}
void Ina(void)
{
/* This simulates two codon sequences and analyze them using Ina's method.
Ina's program is modified to output result in Ina1.tmp. Consistency
analysis is done by generating long sequences.
Note that com.pi[] is not changed in the analysis, which is done outside
the program.
*/
char seqfile[32]="codonseq.tmp",tmpfile[32]="Ina1.tmp",str[4]="";
FILE *fseq, *ftmp;
int ip,ir,nr=500, i,j,k,h, n=Nsensecodon;
int npatt0=n*(n+1)/2, nobs[NCODE*NCODE];
int il,nil=1, ls[]={500,100,200,300,400,500,600,800,1000}, fcodon=1;
double y, t=.5,f3x4[12], x[3]={1,1,1}, S,dS,dN, mr=0;
double t0=1,kappa0=1,omega0=1, mx[6],vx[6],mse[6]; /* t,k,w,dS,dN */
double Efij[NCODE*NCODE], space[50000];
double f3x4_data[][3*4]={
{0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25},
{0.20517, 0.28293, 0.30784, 0.20406, /* mt pri */
0.40979, 0.27911, 0.18995, 0.12116,
0.15105, 0.43290, 0.37123, 0.04482},
{0.19020, 0.16201, 0.36655, 0.28124, /* hiv */
0.28889, 0.18805, 0.30179, 0.22127,
0.24875, 0.16894, 0.36822, 0.21410},
{0.14568, 0.24519, 0.33827, 0.27086,
0.35556, 0.18765, 0.24049, 0.21630,
0.26444, 0.25728, 0.21012, 0.26815} /* lysinNew*/
};
puts("\nThis simulates data and analyses them using Ina95.");
printf ("fcodon? ");
scanf ("%d", &fcodon);
FOR(j,12) f3x4[j]=f3x4_data[fcodon][j];
for(j=0,h=0,y=0; j<64; j++) {
if (GeneticCode[com.icode][j]==-1) continue;
com.pi[h]=f3x4[j/16]*f3x4[4+(j%16)/4]*f3x4[8+j%4];
y+=com.pi[h++];
}
FOR(j,n) com.pi[j]/=y;
printf("fcodon: %d\n",fcodon);
matout(frst,f3x4,3,4);
com.icode=0; com.seqtype=1; com.ns=2; com.ls=1; npatt0=n*(n+1)/2;
com.ncode=n; setmark_61_64 ();
FOR(j,com.ns) com.z[j]=(char*) malloc(npatt0*sizeof(char));
if(com.z[com.ns-1]==NULL) error2 ("oom z");
if((com.fpatt=(double*)malloc(npatt0*sizeof(double)))==NULL)
error2("oom fpatt");
printf("\nInfinite sequences.\nsum pi=1=%.6f\n",sum(com.pi,NCODE));
noisy=0; FOR(i,6) x[i]=0;
FOR(ip,99) {
printf("\nt0 & kappa0 & omega0? ");
scanf("%lf%lf%lf", &t0,&kappa0,&omega0);
if(t0<0) exit(0);
printf("t0 =%.2f & kappa0 =%.2f & omega0 =%.2f\n",t0,kappa0,omega0);
fprintf(frst, "\nt & k & w: %8.2f%8.2f%8.2f\n\n", t0,kappa0,omega0);
EigenQcodon(2,t0,&S,&dS,&dN, NULL,NULL,NULL, &mr, &kappa0,omega0,space);
fprintf(frst,"\nS/(S+N)=%7.4f dS=%7.4f dN=%7.4f\n",S/3,dS,dN);
fputs("Lc & t & k & w & dS & dN\n",frst);
EigenQcodon(1,-1,NULL,NULL,NULL,Root, U, V, &mr, &kappa0, omega0, PMat);
PMatUVRoot (PMat, t0, n, U, V, Root);
for(i=0,h=0;i<n;i++) for(j=0;j<=i;j++) {
y=com.pi[i]*PMat[i*n+j];
Efij[h++]=(i==j?y:2*y);
}
for(i=h=0,com.ls=1,com.npatt=npatt0;i<n;i++) for(j=0;j<=i;j++) {
com.z[0][h]=(char)i; com.z[1][h]=(char)j;
com.fpatt[h]=Efij[h]; h++;
}
for(h=1;h<npatt0; h++) Efij[h]+=Efij[h-1];
if(fabs(1-Efij[npatt0-1])>1e-6) puts("Sum p_ij != 1.");
for(il=0; il<nil; il++) {
com.ls=ls[il];
printf("\nls = %d\n", com.ls);
for(ir=0,zero(mx,6),zero(vx,6),zero(mse,6); ir<nr; ir++) {
printf("\r%4d", ir+1);
MultiNomial (com.ls, npatt0, Efij, nobs, NULL);
for(i=0,com.npatt=0;i<n;i++) for(j=0;j<=i;j++)
if(nobs[k=i*(i+1)/2+j]) {
com.z[0][com.npatt]=i; com.z[1][com.npatt]=j;
com.fpatt[com.npatt++]=nobs[k];
}
fseq=gfopen(seqfile,"w");
fprintf(fseq,"> %6d %6d\n", com.ns,com.ls*3);
for(i=0;i<2;i++,FPN(fseq),fflush(fseq)) {
fprintf(fseq,"seq.%-5d ", i+1);
FOR(h,com.npatt) FOR(k,(int)com.fpatt[h])
fprintf(fseq,"%s", getcodon(str,FROM61[com.z[i][h]]));
}
fclose(fseq);
if(com.ls>2000) system("Ina1Large codonseq.tmp >t");
else system("Ina1 codonseq.tmp >t");
ftmp=gfopen(tmpfile,"r");
if(fscanf(ftmp,"%lf%lf%lf",&x[0],&x[1],&x[2]) !=3)
error2("reading tmpf");
fclose(ftmp);
FOR(j,5) {
vx[j] += (x[j]-mx[j])*(x[j]-mx[j]);
mx[j] = (mx[j]*ir+x[j])/(ir+1.);
}
mse[0]+=square(x[0]-omega0);
printf("%7.4f%7.4f%7.4f %7.4f%7.4f%7.4f%7.4f%7.4f",
x[0],x[1],x[2],mx[0],mx[1],mx[2],mx[3],mx[4]);
/* fprintf(frst1,"%7.4f%7.4f%7.4f %7.4f%7.4f%7.4f%7.4f%7.4f\n",
x[0],x[1],x[2],mx[0],mx[1],mx[2],mx[3],mx[4]);
*/
}
if(nr>1) { FOR(j,5) vx[j]=sqrt(vx[j]/(nr-1.)/nr); mse[0]=sqrt(mse[0]/nr); }
fprintf(frst,"%4d ", com.ls);
FOR(i,5) fprintf(frst,"%7.3f +%7.4f", mx[i],vx[i]);
FPN(frst); fflush(frst);
fprintf(frst1,"%6d%6d %7.2f%7.2f%7.2f: %8.3f +%7.3f\n",
com.ls,nr, t0,kappa0,omega0, mx[0],mse[0]);
fflush(frst1);
} /* for (il) */
} /* for (ip) */
exit(0);
}
#endif
#if 0
int mergeSeqs(FILE*fout)
{
/* This concatenate multiple genes (data sets) for the same set of species
into one file of a long gene. Used to process Anne Yoders' alignment.
*/
char *filenames[12]={"NADH1.fin","NADH2.fin","COI.fin","COII.fin","ATPase8.fin",
"ATPase6.fin","COIII.fin","NADH3.fin","NADH4L.fin","NADH4.fin",
"NADH5.fin", "Cytb.fin"};
int ns0=32, nfile=12, ifile, ls0, lswhole=20000, i,h, lgene0[32];
char *z0[32], *spname0[32]={"Artibeus", "B.musculus", "B.physalus", "Bos",
"Canis", "Cavia", "Ceratother", "Dasypus", "Didelphis", "E.asinus",
"E.caballus","Erinaceus", "Felis", "Glis", "Gorilla", "Halichoeru", "Homo",
"Hylobates", "Macropus", "Mus", "Ornithorhy", "Oryctolagu", "Ovis",
"P.paniscus", "P.troglody", "Papio", "Phoca", "P.abelii",
"P.pygmaeus", "Rattus", "Rhinoceros", "Sus"};
FILE *fseq;
noisy=0;
FOR(i,ns0) if((z0[i]=(char*)malloc(lswhole*sizeof(char)))==NULL)
error2("oom z");
for(ifile=0,ls0=0; ifile<nfile; ifile++) {
printf("Reading data set %2d/%2d (%s)", ifile+1,nfile,filenames[ifile]);
fseq=gfopen (filenames[ifile],"r");
ReadSeq(NULL,fseq,1);
lgene0[ifile]=com.ls; com.ls*=3;
FOR(i,ns0) if(strcmp(spname0[i],com.spname[i])) error2("spname different");
FOR(i,ns0) FOR(h,com.ls) z0[i][ls0+h]=com.z[i][h];
ls0+=com.ls;
printf(" + %5d = %5d\n", com.ls, ls0);
}
fprintf(fout,"%6d %6d G\nG %4d ", ns0,ls0,nfile);
FOR(ifile,nfile) fprintf(fout, " %4d", lgene0[ifile]); FPN(fout);
for(i=0;i<ns0;i++,FPN(fout)) {
fprintf(fout,"%-12s ", spname0[i]);
FOR(h,ls0) {
fprintf(fout,"%c", z0[i][h]);
if((h+1)%3==0) fprintf(fout," ");
}
}
return(0);
}
#endif
int SlidingWindow(FILE*fout, FILE* fpair[], double space[])
{
/* sliding window analysis, clean data, 2 sequences only */
int wlen=windowsize0, offset=offset0, wstart, n=com.ncode, j, h, positive=0;
int ls0=com.ls, npatt0=com.npatt;
char *z0[NS];
double *fpatt0, pi0[NCODE], lnL0=0, lnL1=0;
if(com.seqtype!=1) error2("implemented for codon sequences only.");
if(com.runmode!=-2) error2("this version of sliding windows requires runmode=-2");
if(!com.cleandata || com.ngene>1)
error2("clean data & one gene only for sliding window analysis");
if(com.print)
error2("Choose RateAncestor=0 for sliding window analysis");
for(j=0; j<com.ns; j++)
z0[j] = com.z[j];
for(j=0; j<com.ns; j++)
if((com.z[j]=malloc(npatt0*sizeof(char)))==NULL) error2("oom z");
if((fpatt0=(double*)malloc(npatt0*sizeof(double)))==NULL) error2("oom fp");
for(h=0; h<com.npatt; h++)
fpatt0[h] = com.fpatt[h];
for(j=0; j<n; j++)
pi0[j] = com.pi[j];
for (wstart=0; wstart+wlen<=ls0; wstart+=offset) {
for(h=0; h<npatt0; h++)
com.fpatt[h] = 0;
for(h=wstart; h<wstart+wlen; h++)
com.fpatt[com.pose[h]]++;
for(h=0,com.npatt=0,zero(com.pi,n); h<npatt0;h++) if(com.fpatt[h]>0) {
for(j=0; j<com.ns; j++)
com.z[j][com.npatt] = z0[j][h];
com.fpatt[com.npatt] = com.fpatt[h];
com.npatt++;
}
com.ls = wlen;
com.posG[0] = 0; com.posG[1] = com.npatt;
com.fix_omega = 1; com.omega = 1;
PairwiseCodon(fout,fpair[3],fpair[4],fpair[5],com.space);
lnL0 = -lnLmodel; /* lnLmodel passed overhead from PairwiseCodon() */
com.fix_omega = 0; com.omega = 0.5;
PairwiseCodon(fout,fpair[3],fpair[4],fpair[5],com.space);
lnL1 = -lnLmodel;
if(com.omega>1 && (lnL1-lnL0)>2.71/2) {
positive = 1;
break;
}
if(noisy)
printf("sites %3d -- %3d (%d) npatt:%4d w=%.4f\n",wstart+1,wstart+wlen,ls0,com.npatt, com.omega);
fprintf(fout,"\nsites %3d -- %3d %4d",wstart+1,wstart+wlen,com.npatt);
/* Forestry(fout); */
}
fprintf(frst1, " %2d", positive);
printf(" %2d", positive);
com.ls = ls0; com.posG[1] = com.npatt = npatt0;
for(h=0; h<com.npatt; h++)
com.fpatt[h] = fpatt0[h];
xtoy(pi0, com.pi, n);
free(fpatt0);
for(j=0; j<com.ns; j++) {
free(com.z[j]);
com.z[j] = z0[j];
}
return(positive);
}
void Get4foldSites(void)
{
/* This collects the four-fold degerate sites into a file named 4fold.nuc.
The data are not coded yet, and the routine is called from ReadSeq().
*/
int ls4, j,k,h, ib[3][4], nb[3];
char file4[12]="4fold.nuc", *mark4;
FILE *f4;
f4=gfopen(file4,"w");
if ((mark4=(char*)malloc(com.ls*sizeof(char)))==NULL) error2("oom mark");
FOR(h,com.ls) mark4[h]=0;
for (h=0,ls4=0; h<com.ls; h++) {
for(k=0; k<3; k++)
NucListall(com.z[0][h*3+k], &nb[k], ib[k]);
if(nb[0]==1 && nb[2]==1 && FourFold[ib[0][0]][ib[1][0]]) {
for(j=1; j<com.ns; j++)
for(k=0; k<2; k++) if(com.z[j][h*3+k]!=com.z[0][h*3+k]) goto nextsite;
mark4[h]=1; ls4++;
}
nextsite: ;
} /* for(h) */
fprintf (f4, "%6d %6d\n", com.ns, ls4);
for (j=0; j<com.ns; j++) {
fprintf (f4, "\n%s\n", com.spname[j]);
for (h=0; h<com.ls; h++)
if(mark4[h]) fprintf (f4, "%c", com.z[j][h*3+2]);
FPN (f4);
}
fprintf(f4, "\n\ncodons included\n");
for(h=0; h<com.ls; h++)
if(mark4[h]) fprintf(f4, " %2d", h+1);
FPN(f4);
fclose(f4); free(mark4);
}
double distanceHKY85 (double x[], double *kappa, double alpha);
void d4dSdN(FILE* fout)
{
/* This looks at the 4-fold degerenate sites.
*/
char str1[4]=" ", str2[4]=" ";
int i,j,k, n=com.ncode, b[2][3], ic1,ic2,iaa;
double pS4,d4,kappa4fold;
double fij, fij4f[4*4], pi4f[4], pstop,t, S,dS,dN,dN_dS, mr=0;
double fb3x4[12]={.25, .25, .25, .25,
.25, .25, .25, .25,
.25, .25, .25, .25};
int nii=18, ii;
double t0[]={0.001, 0.01,0.05, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1, 1.2, 1.5, 2,2.5,3};
com.ls=1; com.kappa=3; com.omega=1;
fb3x4[0*4+0]=0.35;
fb3x4[0*4+1]=0.15;
fb3x4[0*4+2]=0.35;
fb3x4[0*4+3]=0.15;
/*
fb3x4[1*4+0]=0.35;
fb3x4[1*4+1]=0.15;
fb3x4[1*4+2]=0.35;
fb3x4[1*4+3]=0.15;
*/
fb3x4[2*4+0]=0.35;
fb3x4[2*4+1]=0.15;
fb3x4[2*4+2]=0.35;
fb3x4[2*4+3]=0.15;
printf("\tt\tS\tdS\tdN\tdN/dS\tS4\td4\tk_4f\tpT_4f\n");
zero(com.pi,64);
FOR(k,64) if(FROM64[k]>-1)
com.pi[FROM64[k]]=fb3x4[k/16]*fb3x4[4+(k/4)%4]*fb3x4[8+k%4];
pstop=1-sum(com.pi,n);
abyx(1/(1-pstop),com.pi,n);
EigenQcodon(1,-1,NULL,NULL,NULL,Root,U,V, &mr, &com.kappa,com.omega,PMat);
matout(frst,com.pi,16,4);
FOR(ii,nii) {
t=t0[ii];
EigenQcodon(2,t,&S,&dS,&dN,NULL,NULL,NULL, &mr, &com.kappa,com.omega,PMat);
PMatUVRoot (PMat, t, n, U, V, Root);
if(testTransP(PMat,n)) error2("testP");
matout(frst,PMat,n,n);
for(i=0,zero(fij4f,16);i<n;i++) {
ic1=FROM61[i]; b[0][0]=ic1/16; b[0][1]=(ic1/4)%4; b[0][2]=ic1%4;
iaa=GeneticCode[com.icode][ic1];
ic1-=b[0][2];
FOR(k,4) if(GeneticCode[com.icode][ic1+k]!=iaa) break;
if(k<4) continue;
FOR(j,n) {
fij=com.pi[i]*PMat[i*n+j];
ic2=FROM61[j]; b[1][0]=ic2/16; b[1][1]=(ic2/4)%4; b[1][2]=ic2%4;
if(b[0][0]!=b[1][0] || b[0][1]!=b[1][1]) continue;
fij4f[b[0][2]*4+b[1][2]] += fij;
/* printf("%c %s %s %.8f\n",AAs[iaa],getcodon(str1,ic1+b[0][2]),getcodon(str2,ic2),fij);
*/
}
}
pS4=sum(fij4f,16)/3;
abyx(1/sum(fij4f,16),fij4f,16);
FOR(k,4) pi4f[k]=sum(fij4f+k*4,4);
/* matout(F0,fij4f,4,4); */
d4 = distanceHKY85 (fij4f, &kappa4fold, 0);
dN_dS = (dS>0 ? dN/dS : -1);
printf("\t%.4f\t%.5f\t%.5f\t%.5f\t%.5f\t%.3f\t%.5f\t%.3f\t%.4f\n",
t,S/3,dS,dN,dN_dS, pS4,d4,kappa4fold,pi4f[0]);
}
printf("\nproportion of stop codons: %.4f\n", pstop);
exit(0);
}
double distanceHKY85 (double x[], double *kappa, double alpha)
{
/* This is from SeqDivergence(), copied here to avoid linking to SeqDivergence.
*/
int i,j;
double p[4], Y,R, a1,a2,b, P1,P2,Q,tc,ag;
double largek=999, larged=9;
if (testXMat(x) && noisy) {
matout(F0,x,4,4);
puts("X err.. Perhaps no sites to compare?");
}
*kappa=0;
for (i=0,zero(p,4); i<4; i++) {
FOR (j,4) { p[i]+=x[i*4+j]/2; p[j]+=x[i*4+j]/2; }
}
P1=x[0*4+1]+x[1*4+0];
P2=x[2*4+3]+x[3*4+2];
Q = x[0*4+2]+x[0*4+3]+x[1*4+2]+x[1*4+3]+ x[2*4+0]+x[2*4+1]+x[3*4+0]+x[3*4+1];
Y=p[0]+p[1];
R=p[2]+p[3];
if(P1+P2+Q<1e-100) {
*kappa=-1; return(0);
}
tc=p[0]*p[1];
ag=p[2]*p[3];
a1=1-Y*P1/(2*tc)-Q/(2*Y);
a2=1-R*P2/(2*ag)-Q/(2*R);
b=1-Q/(2*Y*R);
if (a1<=0 || a2<=0 || b<=0) return (larged);
if (alpha<=0) { a1=-log(a1); a2=-log(a2); b=-log(b); }
else { a1=-gammap(a1,alpha); a2=-gammap(a2,alpha); b=-gammap(b,alpha);}
a1 = -R/Y*b + a1/Y;
a2 = -Y/R*b + a2/R;
if (b>0) *kappa = min2((a1+a2)/(2*b), largek);
return 2*(p[0]*p[1] + p[2]*p[3])*(a1+a2)/2 + 2*Y*R*b;
}
void get_pclassM_iw_M2M8(int *iw, double *pclassM,
int iclassM, int ip[], double para[4][100], int n1d, int M2a, int ternary);
void get_grid_para_like_M2M8(double para[4][100], int n1d, int dim, int M2a, int ternary,
double p0b[], double p1b[], double w0b[], double wsb[],
double p_beta_b[], double q_beta_b[], double x[], double *S);
void GetIndexTernary(int *ix, int *iy, double *x, double *y, int itriangle, int K);
void get_grid_para_like_M2M8 (double para[4][100], int n1d, int dim, int M2a, int ternary,
double p0b[], double p1b[], double w0b[], double wsb[],
double p_beta_b[], double q_beta_b[], double x[], double *S)
{
/* This sets up the grid (para[][]) according to the priors. It also copies all
possible w values into com.rK[].
The bounds on parameters are used to set up the uniform priors for parameters.
*/
int i,k,h, site=10;
double fh;
if(com.NSsites==NSbetaw) /* can't control the range of w from the beta */
{ w0b[0]=0; w0b[1]=1; }
for(i=0; i<n1d; i++) {
para[0][i] = p0b[0]+(i+0.5)*(p0b[1]-p0b[0])/n1d; /* p0 */
if(com.NSsites==2) { /* M2 & M2a */
para[1][i] = p1b[0]+(i+0.5)*(p1b[1]-p1b[0])/n1d; /* p1 */
if(ternary) para[0][i] = para[1][i] = -1;
if(M2a)
para[2][i] = w0b[0]+(i+0.5)*(w0b[1]-w0b[0])/n1d; /* w0 */
para[2+M2a][i] = wsb[0]+(i+0.5)*(wsb[1]-wsb[0])/n1d; /* w2 */
}
else { /* M8 */
para[1][i] = p_beta_b[0]+(i+0.5)*(p_beta_b[1]-p_beta_b[0])/n1d; /* p */
para[2][i] = q_beta_b[0]+(i+0.5)*(q_beta_b[1]-q_beta_b[0])/n1d; /* q */
para[3][i] = wsb[0]+(i+0.5)*(wsb[1]-wsb[0])/n1d; /* ws */
}
}
k=0;
if(com.NSsites==2 && M2a==0)
com.rK[k++]=0;
else /* w0 in M2a or w0 from beta in M8 */
for(i=0; i<n1d; i++)
com.rK[k++] = w0b[0]+(i+0.5)*(w0b[1]-w0b[0])/n1d;
if(com.NSsites==2)
com.rK[k++]=1; /* w1 for M2 & M2a */
for(i=0; i<n1d; i++)
com.rK[k++] = wsb[0]+(i+0.5)*(wsb[1]-wsb[0])/n1d; /* w2 in M2 or ws */
/* calculates the likelihood com.fhK[] */
printf("\nCalculating f(x_h|w): %d categories %d w sets.\n", n1d, com.ncatG);
com.conPSiteClass=0; *S=0;
fx_r(x,-1);
if(noisy>3)
for(k=0; k<com.ncatG; k++)
printf("S%d w log{f(x|w)}: %9.4f %12.6f\n",
site,com.rK[k], (com.NnodeScale?com.fhK[k*com.npatt+site]:log(com.fhK[k*com.npatt+site])));
if(com.NnodeScale)
for(h=0; h<com.npatt; h++) {
for(k=1,fh=com.fhK[h]; k<com.ncatG; k++)
fh = max2(fh,com.fhK[k*com.npatt+h]);
for(k=0; k<com.ncatG; k++)
com.fhK[k*com.npatt+h] = exp(com.fhK[k*com.npatt+h]-fh);
*S += fh*com.fpatt[h];
}
else
for(h=0; h<com.npatt; h++) {
for(k=1,fh=com.fhK[h]; k<com.ncatG; k++)
fh = max2(fh,com.fhK[k*com.npatt+h]);
for(k=0; k<com.ncatG; k++)
com.fhK[k*com.npatt+h] /= fh;
*S += log(fh)*com.fpatt[h];
}
}
void get_pclassM_iw_M2M8(int *iw, double *pclassM,
int iclassM, int ip[], double para[][100], int n1d, int M2a, int ternary)
{
/* Given the point on the grid (ip[]), this returns iw and pclassM, where iw
locates the w ratio in com.rK[] and f(x_h|w) stored in com.fhK[],
and pclassM is the proportion of the site class under the model.
Look at get_grid_para_like() for more info about the setup of com.rK[], which
accounts for the setup of iw here in this function.
M8 used to use 10 categories to approximate the beta, each of probability
10%. Here we use n1d categories, equally spaced, and the
probabilities for categories are calculated using CDFBeta.
Parameters for grid integration:
Parameters Parameter dependence
Model 0 1 2 3 iw pclassM
-------------------------------------------------------------------
M2 p0 p1 w2 iclassM w0 w2 iclassM p0 p1
M2a p0 p1 w0 w2 iclassM w2 iclassM p0 p1
M8 p0 p q ws iclassM p q ws iclassM p0 p q
-------------------------------------------------------------------
If M2 or M2a and ternary, the n1d*n1d grid for p0-p1 is mapped onto the
triangle specified by p0-p1-p2. First the index i and j are retrieved
from the label for the point (ip[0]*n1d+ip[1]). Then the coordinates
p0 and p1 at the point is worked out. With this scheme, p0 and p1 each
takes on 2*n1d-1 possible values.
*/
int i,j;
double p0,p1, p,q, cdf0=0,cdf1=1;
if(com.NSsites==NSpselection) { /* M2 & M2a */
if(ternary) {
GetIndexTernary(&i, &j, &p0, &p1, ip[0]*n1d+ip[1], n1d);
*pclassM = (iclassM==0 ? p0 : (iclassM==1 ? p1 : 1-p0-p1));
}
else {
if(iclassM<2) *pclassM = para[iclassM][ip[iclassM]]; /* p0 or p1 */
else *pclassM = 1-para[0][ip[0]]-para[1][ip[1]]; /* p2 */
*pclassM = max2(*pclassM,0);
}
if(M2a==0) { /*M2 */
if(iclassM<2) *iw = iclassM; /* w0 or w1 */
else *iw = 2+ip[2]; /* w2 */
}
else { /* M2a */
if(iclassM==0) *iw = ip[2]; /* w0 */
else if(iclassM==1) *iw = n1d; /* w1 */
else *iw = n1d+1+ip[3]; /* w2 */
}
}
else { /* M8 */
p0 = para[0][ip[0]];
if(iclassM<n1d) { /* w from beta */
p = para[1][ip[1]];
q = para[2][ip[2]];
if(iclassM>0) cdf0 = CDFBeta(iclassM/(double)n1d, p, q, 0);
if(iclassM<n1d-1) cdf1 = CDFBeta((iclassM+1.0)/n1d, p, q, 0);
*pclassM = p0*(cdf1-cdf0);
*iw = iclassM;
}
else { /* ws */
*pclassM = 1-p0;
*iw = n1d+ip[3];
}
}
}
int lfunNSsites_M2M8 (FILE* frst, double x[], int np)
{
/* Bayes empirical Bayes (BEB) correction for the posterior of w for each site
under M2 or M8. The integral is 3-d for M2, and 4-d for M2a or M8,
approximated using n1d=10 categories in each dimension. The ngrid=n1d^dim
points make up the grid.
com.ncatG is the number of all possible w's ever needed. They are copied
into com.rK[], to be used to calculate f(x_h|w), stored in com.fhK[], before
entering the grid of 4-d integration. iw[ngrid*nclassM] identifies the
position of w in com.rK[], and pclassM[ngrid*nclassM] is the proportion
of sites under the model. Those are set up in get_pclassM_iw().
The priors are set up in get_grid_para_like(). See notes there.
Some control variables:
M2a=1 for M2a=0 for M2.
ternary=1: use ternary triangles to specify prior for p0-p1 under M2 or M2a
=0: break p0 and p1 into 10 bins and skip the unfeasible points
Parameters and their priors are as follows:
M2 (p0 p1 w2) : p0,p1~U(0,1), w2~U(1,11)
M2a(p0 p1 w0 w2): p0,p1~U(0,1), w0~U(0,1), w2~U(1,11)
M8 (p0 p q ws): p0~U(0,1), p,q~U(0,2), ws~U(1,11)
Ziheng, Copenhagen, 17 May 2004.
*/
int n1d=10, M2a=1, ternary=1, trianglePriorM8=0;
double p0b[]={0,1}, p1b[]={0,1}, w0b[]={0,1}; /* w0b for M2a only. */
double wsb[]={1,11}; /* for w2 in M2 & M2a, or for ws in M8 */
double p_beta_b[]={0,2}, q_beta_b[]={0,2};
int dim=(com.NSsites==8||M2a?4:3), ngrid,igrid, ip[4]={0}, j,k,h, it;
int refsp=0, ncatG0=com.ncatG;
/* # of site classes under model and index for site class */
int nclassM = (com.NSsites==NSpselection?3:n1d+1), iclassM, *iw;
double para[4][100]={{0}}, postpara[4][100]; /* paras on grid for 4-d integral: n1d<=100! */
/* lnfXs is log of term in equation 5 in YWN05, which sums over those terms. */
double fh, fX, *lnfXs,S1,S2, *lnprior, *pclassM, *meanw, *varw, *postSite, *postp0p1=NULL;
double fh1site, t,v;
char timestr[32], *paras[4];
printf("\nBEBing (dim = %d). This may take several minutes.", dim);
if(com.NSsites==8) { paras[0]="p0"; paras[1]="p"; paras[2]="q"; paras[3]="ws"; }
else if(!M2a) { paras[0]="p0"; paras[1]="p1"; paras[2]="w2"; }
else { paras[0]="p0"; paras[1]="p1"; paras[2]="w0"; paras[3]="w2"; }
ngrid=n1d*n1d*n1d*(dim==4?n1d:1);
if(com.NSsites==8) com.ncatG = n1d+n1d; /* w from beta & ws */
else com.ncatG = (M2a ? n1d+1+n1d : 2+n1d); /* w0, w1=1, w2 */
if((meanw=(double*)malloc(com.npatt*(2+nclassM)*sizeof(double)))==NULL)
error2("oom meanw");
varw=meanw+com.npatt; postSite=varw+com.npatt;
ternary=(com.NSsites==2 && ternary);
if(ternary && (postp0p1=(double*)malloc(n1d*n1d*sizeof(double)))==NULL)
error2("oom postp0p1");
if((lnfXs=(double*)malloc(ngrid*sizeof(double)))==NULL)
error2("oom lnfXs");
if((pclassM=(double*)malloc(ngrid*nclassM*(sizeof(double)+sizeof(int))))==NULL)
error2("oom pclassM"); /* this wastes space */
iw = (int*)(pclassM+ngrid*nclassM);
if((lnprior=(double*)malloc(n1d*n1d*sizeof(double)))==NULL)
error2("oom lnprior"); /* this wastes space */
k=com.npatt*com.ncatG*sizeof(double);
if((com.fhK=(double*)realloc(com.fhK,k))==NULL) error2("oom fhK");
for(j=0; j<n1d*n1d; j++) lnprior[j]=0;
if(com.NSsites==8 && trianglePriorM8) {
/* for(j=0; j<n1d; j++) lnprior[j]=(2-1./n1d-j*2./n1d)/n1d; */
for(j=0; j<n1d; j++) lnprior[j]=(2*j+1.)/(n1d*n1d);
printf("triangular prior for p0 under M8\n");
for(j=0; j<n1d; j++) printf("%9.4f", (2*j+1.)/(2*n1d)); FPN(F0);
for(j=0; j<n1d; j++) printf("%9.4f", lnprior[j]); FPN(F0);
}
BayesEB=1;
get_grid_para_like_M2M8(para, n1d, dim, M2a, ternary, p0b, p1b, w0b, wsb, p_beta_b, q_beta_b, x, &S1);
/* Set up im and pclassM, for each igrid and iclassM. */
for(igrid=0; igrid<ngrid; igrid++) {
for(j=dim-1,it=igrid; j>=0; j--) { ip[j]=it%n1d; it/=n1d; }
if(com.NSsites==2 && !ternary && para[0][ip[0]]+para[1][ip[1]]>1) continue;
for(k=0; k<nclassM; k++) {
get_pclassM_iw_M2M8(&iw[igrid*nclassM+k], &pclassM[igrid*nclassM+k],k,ip,para,n1d,M2a,ternary);
}
}
/* calculate log{fX}, where fX is the marginal probability of data,
and posterior of parameters postpara[]. S2 is the scale factor. */
printf("Calculating f(X), the marginal probability of data.\n");
fX=1; S2=-1e300;
FOR(j,dim) FOR(k,n1d) postpara[j][k]=1;
if(ternary) FOR(k,n1d*n1d) postp0p1[k]=1;
for(igrid=0; igrid<ngrid; igrid++) {
for(j=dim-1,it=igrid; j>=0; j--) { ip[j]=it%n1d; it/=n1d; }
if(com.NSsites==2 && !ternary && para[0][ip[0]]+para[1][ip[1]]>1)
continue;
for(h=0,lnfXs[igrid]=0; h<com.npatt; h++) {
for(k=0,fh=0; k<nclassM; k++)
fh += pclassM[igrid*nclassM+k]*com.fhK[iw[igrid*nclassM+k]*com.npatt+h];
if(fh<1e-300) {
printf("strange: f[%3d] = %12.6g very small.\n",h,fh);
continue;
}
lnfXs[igrid] += log(fh)*com.fpatt[h];
}
lnfXs[igrid] += (com.NSsites==8 ? lnprior[ip[0]] : lnprior[ip[0]*n1d+ip[1]]);
t=lnfXs[igrid]-S2;
if(t>0) { /* change scale factor S2 */
t = (t<200 ? exp(-t) : 0);
fX=fX*t+1;
FOR(j,dim) FOR(k,n1d)
postpara[j][k] *= t;
FOR(j,dim)
postpara[j][ip[j]] ++;
if(ternary) {
FOR(k,n1d*n1d) postp0p1[k] *= t;
postp0p1[ip[0]*n1d+ip[1]] ++;
}
S2 = lnfXs[igrid];
}
else if(t>-200) {
t = exp(t);
fX += t;
for(j=0; j<dim; j++)
postpara[j][ip[j]] += t;
if(ternary) postp0p1[ip[0]*n1d+ip[1]] += t;
}
}
for(j=0; j<dim; j++)
for(k=0; k<n1d; k++)
postpara[j][k]/=fX;
if(ternary)
for(k=0; k<n1d*n1d; k++)
postp0p1[k] /=fX;
fX = log(fX)+S2;
printf("\tlog(fX) = %12.6f S = %12.6f %12.6f\n", fX+S1-dim*log(n1d*1.),S1,S2);
/* calculate posterior probabilities and mean w for each site pattern.
S1 and S2 are scale factors for probabilities and for w. */
printf("Calculating f(w|X), posterior probabilities of site classes.\n");
for(h=0; h<com.npatt; h++) {
S1=-1e300; FOR(j,nclassM) postSite[j*com.npatt+h]=1;
S2=-1e300; meanw[h]=varw[h]=1;
for(iclassM=0; iclassM<nclassM; iclassM++) {
for(igrid=0; igrid<ngrid; igrid++) {
for(j=dim-1,it=igrid; j>=0; j--) { ip[j]=it%n1d; it/=n1d; }
if(com.NSsites==2 && !ternary && para[0][ip[0]]+para[1][ip[1]]>1)
continue;
for(k=0,fh=0; k<nclassM; k++) /* duplicated calculation */
fh += pclassM[igrid*nclassM+k]*com.fhK[iw[igrid*nclassM+k]*com.npatt+h];
it = igrid*nclassM+iclassM;
fh1site = pclassM[it]*com.fhK[iw[it]*com.npatt+h];
if(fh1site<1e-300) continue;
fh1site /= fh;
t = log(fh1site)+lnfXs[igrid]; /* t is log of term on grid */
if(t>S1) { /* change scale factor S1 */
for(j=0; j<nclassM; j++)
postSite[j*com.npatt+h] = postSite[j*com.npatt+h]*exp(S1-t);
S1 = t;
}
postSite[iclassM*com.npatt+h] += exp(t-S1);
t = fh1site*com.rK[iw[it]];
v = fh1site*square(com.rK[iw[it]]);
if(t<1e-300) continue;
t = log(t)+lnfXs[igrid]; /* t is log of mean */
v = log(v)+lnfXs[igrid];
if(t>S2) { /* change scale factor S2 */
meanw[h] = meanw[h]*exp(S2-t);
varw[h] = varw[h]*exp(S2-t);
S2 = t;
}
meanw[h] += exp(t-S2);
varw[h] += exp(v-S2);
}
}
for(j=0; j<nclassM; j++)
postSite[j*com.npatt+h] *= exp(S1-fX);
meanw[h] *= exp(S2-fX);
varw[h] *= exp(S2-fX);
varw[h] -= meanw[h]*meanw[h];
varw[h] = (varw[h]>0?sqrt(varw[h]):0);
if((h+1)%10==0 || h==com.npatt-1)
printf("\r\tdid %3d / %3d patterns %s", h+1,com.npatt,printtime(timestr));
} /* for(h) */
/* print out posterior probabilities */
fprintf(frst,"\nBayes Empirical Bayes (BEB) probabilities for %d classes (class)", nclassM);
fprintf(fout,"\nBayes Empirical Bayes (BEB) analysis");
fprintf(fout," (Yang, Wong & Nielsen 2005. Mol. Biol. Evol. 22:1107-1118)");
com.ncatG = ncatG0;
PrintProbNSsites(frst, postSite, meanw, varw, nclassM, refsp);
fprintf(fout, "\n\nThe grid %s\n\n", (ternary?"(see ternary graph for p0-p1)":""));
for(j=(ternary?2:0); j<dim; j++,FPN(fout)) {
fprintf(fout, "%-2s: ", paras[j]);
for(k=0; k<n1d; k++)
fprintf(fout, " %6.3f", para[j][k]);
}
if(ternary) for(k=0; k<n1d; k++) postpara[0][k]=postpara[1][k]=-1;
fprintf(fout, "\n\nPosterior on the grid\n\n");
for(j=(ternary?2:0); j<dim; j++,FPN(fout)) {
fprintf(fout, "%-2s: ", paras[j]);
for(k=0;k<n1d;k++)
fprintf(fout, " %6.3f", postpara[j][k]);
}
if(ternary) {
fprintf(fout,"\nPosterior for p0-p1 (see the ternary graph)\n\n");
for(k=0;k<n1d*n1d;k++) {
fprintf(fout," %5.3f", postp0p1[k]);
if(fabs(square((int)sqrt(k+1.))-(k+1))<1e-5) FPN(fout);
}
fprintf(fout,"\nsum of density on p0-p1 = %10.6f\n", sum(postp0p1,n1d*n1d));
}
BayesEB = 0;
free(meanw); free(lnfXs); free(pclassM); free(lnprior);
if(ternary) free(postp0p1);
return(0);
}
/********************************************************************/
void get_grid_para_like_AC(double para[][100], int n1d, int dim,
double w0b[], double w2b[], double x[], double *S);
void get_pclassM_iw_AC(int *iw, double *pclassM, int iclassM, int ip[], double para[][100], int n1d);
void get_grid_para_like_AC(double para[][100], int n1d, int dim,
double w0b[], double w2b[], double x[], double *S)
{
/* This sets up the grid (para[][]) according to the priors.
It calculates the probability of data at each site given w: f(f_h|w).
This is calculated using the branch model (NSsites = 0 model = 2), with
BayesEB=2 used to force the use of the correct scale factors in GetPMatBranch().
Order of site classes for iw or f(x_h|w):
fore back #sets
Branchsite A (121 sets)
site class 0: w0 w0 10
site class 1: w1=1 w1=1 1
site class 2a: w0 w2 100
site class 2b: w1=1 w2 10
Clade C (111 sets)
site class 0: w0 w0 10
site class 1: w1=1 w1=1 1
site class 2: w2 w3 10*10*10...
*/
int modelA=(com.model==2), i,k,h, iw, site=10;
double fh, wbranches[NBTYPE]; /* w for back and fore branches */
int NSsites0=com.NSsites, model0=com.model;
for(i=0; i<n1d; i++) {
para[0][i] = para[1][i] = -1; /* p0 & p1 */
para[2][i] = w0b[0] + (i+0.5)*(w0b[1]-w0b[0])/n1d; /* w0 */
para[3][i] = w2b[0] + (i+0.5)*(w2b[1]-w2b[0])/n1d; /* w2 */
if(com.model==3) /* w3 w4 ... in model C */
for(k=1; k<com.nbtype; k++)
para[3+k][i] = para[3][i];
}
/* calculates the likelihood com.fhK[] */
printf("\nCalculating f(x_h|w) for %d w's\n", com.ncatG);
com.conPSiteClass = 0;
*S = 0;
com.model = 2;
com.NSsites = 0;
com.pomega = wbranches;
for(iw=0; iw<com.ncatG; iw++) {
if(modelA) { /* model A: 10 + 1 + 100 + 10 */
if(iw<n1d) wbranches[0] = wbranches[1] = para[2][iw]; /* class 0: w0 */
else if(iw==n1d) wbranches[0] = wbranches[1] = 1; /* class 1: w1 */
else if(iw<n1d+1+n1d*n1d) { /* class 2a: w0 w2 */
wbranches[0] = para[2][(iw-n1d-1)/n1d];
wbranches[1] = para[3][(iw-n1d-1)%n1d];
}
else { /* class 2b: w1 w2 */
wbranches[0] = 1;
wbranches[1] = para[3][iw-n1d-1-n1d*n1d];
}
}
else { /* model C: 10 + 1 + 10*10*... */
if(iw<n1d) /* class 0: w0 */
for(i=0; i<com.nbtype; i++) wbranches[i] = para[2][iw];
else if(iw==n1d) /* class 1: w1 */
for(i=0; i<com.nbtype; i++) wbranches[i] = 1;
else { /* class 2: w2 w3 */
for(i=com.nbtype-1,k=iw-n1d-1; i>=0; i--) {
wbranches[i] = para[3+i][k%n1d];
k /= n1d;
}
}
/*
printf("\nw%-2d: ", iw+1);
for(i=0; i<com.nbtype; i++) printf(" %10.6f", wbranches[i]);
*/
}
ConditionalPNode(tree.root, 0, x);
for(h=0; h<com.npatt; h++) {
for(i=0,fh=0; i<com.ncode; i++)
fh += com.pi[i]*nodes[tree.root].conP[h*com.ncode+i];
if(fh<=0) {
if(fh<-1e-5) printf("\nfh = %.6f negative\n",fh);
fh=1e-80;
}
fh = log(fh);
for(k=0; k<com.NnodeScale; k++)
fh += com.nodeScaleF[k*com.npatt+h];
com.fhK[iw*com.npatt+h] = fh;
}
if((iw+1)%10==0 || iw==com.ncatG-1)
printf("\r\t %4d / %d sets.", iw+1, com.ncatG);
}
FPN(F0);
for(h=0,*S=0; h<com.npatt; h++) {
for(k=1,fh=com.fhK[h]; k<com.ncatG; k++)
fh = max2(fh,com.fhK[k*com.npatt+h]);
for(k=0; k<com.ncatG; k++)
com.fhK[k*com.npatt+h] = exp(com.fhK[k*com.npatt+h]-fh);
*S += fh*com.fpatt[h];
}
com.NSsites=NSsites0; com.model=model0;
}
void get_pclassM_iw_AC(int *iw, double *pclassM, int iclassM, int ip[], double para[][100], int n1d)
{
/* Given the point on the grid ip[] and iclassM, this returns iw and pclassM,
where iw locates the correct f(x_h|w) stored in com.fhK[], and pclassM is
the proportion of the site class under the model.
The n1d*n1d grid for p0-p1 is mapped onto the ternary graph for p0-p1-p2.
See get_grid_para_like_AC() for order of iw or site classes.
Parameters are model A: (p0 p1 w0 w2)
model C: (p0 p1 w0 w2 w3 ...)
*/
int modelA=(com.model==2), i,j;
double p[3];
GetIndexTernary(&i, &j, &p[0], &p[1], ip[0]*n1d+ip[1], n1d);
p[2] = 1-p[0]-p[1];
*pclassM = p[iclassM<=2 ? iclassM : 2];
if(modelA && iclassM>=2) *pclassM = p[2]*p[iclassM-2]/(1-p[2]);
if(iclassM==0) *iw = ip[2]; /* class 0: w0 */
else if(iclassM==1) *iw = n1d; /* class 1: w1 */
else if(modelA==0) { /* clade model C site class 2: w2 w3 w4 ... */
for(i=0,*iw=0; i<com.nbtype; i++)
*iw = *iw*n1d + ip[3+i];
*iw += n1d+1;
}
else if(iclassM==2) *iw = n1d+1+ip[2]*n1d+ip[3]; /* class 2a model A: w0 & w2 */
else *iw = n1d+1+n1d*n1d+ip[3]; /* class 2b model A: w1 & w2 */
}
int lfunNSsites_AC (FILE* frst, double x[], int np)
{
/* Bayes empirical Bayes (BEB) calculation of posterior probabilities for site
classes under the branch-site model A (Yang & Nielsen 2002) and clade model C
(Bielawski & Yang 2004). The dimension of integral is 4 for A and (3+nbtype)
for C. Each dimension is approximated using n1d=10 categories, and the grid
is made up of ngrid=n1d^dim points.
For branch-site model A, the probability of data at a site f(x_h|w) needs to
be calculated for 121=(d+1+d*d+d) sets of w's. For model C, it needs to be
calculated for 111 (d+1+d^nbtype) sets.
Those are calculated and stored in com.fhK[], before entering the grid.
iw[ngrid*nclassM] identifies the right f(x_h|w), and pclassM[ngrid*nclassM]
is the proportion of sites under the model, f(w|ita). Those are set up in
get_pclassM_iw_AC().
The priors are set up in get_grid_para_like_AC(). See notes there.
Parameters and priors are as follows:
model A (p0 p1 w0 w2): p0,p1~U(0,1), w0~U(0,1), w2~U(1,11)
model C (p0 p1 w0 w2 w3): p0,p1~U(0,1), w0~U(0,1), w2,w3~U(0,5)
Ziheng, UCL, 22 September 2004, modified Nov 2008 to use more than 2 branch types
under clade model C.
*/
int n1d=10, debug=0, site=10;
double w0b[]={0,1}; /* w0b for w0. */
double wsb[]={1,11}; /* for w2 in model A */
double w2b[]={0,3}; /* for w2-w3-w4 in model C */
int modelA=(com.model==2), dim=(modelA?4:3+com.nbtype), ngrid,igrid, ip[3+NBTYPE], j,k,h,hp,it;
int refsp=0, ncatG0=com.ncatG, lst=(com.readpattern?com.npatt:com.ls);
/* # of site classes under model and index for site class */
int nclassM = (modelA?4:3), iclassM, *iw, i;
double para[3+NBTYPE][100]={{0}}, postpara[3+NBTYPE][100]; /* paras on grid : n1d<=100! */
double fh, fX, *lnfXs,S1,S2, *pclassM, *postSite, *postp0p1;
double fhk[3], t, cutoff=0.5;
char timestr[32], paras[3+NBTYPE][5]={"p0","p1","w0","w2","w3"}, *sig, aa;
printf("\nBEBing (dim = %d). This may take many minutes.", dim);
if(!modelA)
for(i=2; i<com.nbtype; i++) sprintf(paras[3+i], "w%d", i+2);
for(i=0,ngrid=1; i<dim; i++) ngrid *= n1d;
if(modelA)
com.ncatG = n1d + 1 + n1d*n1d + n1d; /* branch-site model A: table 1 YWN05 */
else { /* clade model C: table 2 YWN05 */
for(i=0,com.ncatG=1; i<com.nbtype; i++) com.ncatG *= n1d; /* w2 w3 w4 ... */
com.ncatG += n1d + 1; /* w0 & w1=1 */
}
k = (n1d*n1d + com.npatt*nclassM + ngrid + ngrid*nclassM)*sizeof(double)
+ ngrid*nclassM*sizeof(int);
if(noisy) printf("\nTrying to get %dM memory in lfunNSsites_A\n", k);
if((postp0p1=(double*)malloc(k)) == NULL)
error2("oom in lfunNSsites_AC");
postSite = postp0p1 + n1d*n1d;
lnfXs = postSite + com.npatt*nclassM;
pclassM = lnfXs + ngrid;
iw = (int*)(pclassM + ngrid*nclassM);
k = com.npatt*com.ncatG*sizeof(double);
if((com.fhK=(double*)realloc(com.fhK,k)) == NULL) error2("oom fhK");
BayesEB = 2;
get_grid_para_like_AC(para, n1d, dim, w0b, (modelA?wsb:w2b), x, &S1);
/* Set up im and pclassM, for each igrid and iclassM. */
for(igrid=0; igrid<ngrid; igrid++) {
for(j=dim-1,it=igrid; j>=0; j--) { ip[j]=it%n1d; it/=n1d; }
for(k=0; k<nclassM; k++) {
get_pclassM_iw_AC(&iw[igrid*nclassM+k], &pclassM[igrid*nclassM+k],k,ip,para,n1d);
}
}
/* calculate marginal prob of data, fX, and postpara[]. S2 is scale. */
printf("Calculating f(X), the marginal probability of data.\n");
fX=1; S2=-1e300;
for(j=0; j<dim; j++) /* postpara[0-1] for p0p1 ignored */
for(k=0; k<n1d; k++)
postpara[j][k] = 1;
for(k=0; k<n1d*n1d; k++)
postp0p1[k] = 1;
for(igrid=0; igrid<ngrid; igrid++) {
for(j=dim-1,it=igrid; j>=0; j--) {
ip[j]=it%n1d;
it/=n1d;
}
for(h=0,lnfXs[igrid]=0; h<com.npatt; h++) {
for(k=0,fh=0; k<nclassM; k++)
fh += pclassM[igrid*nclassM+k]*com.fhK[iw[igrid*nclassM+k]*com.npatt+h];
if(fh<1e-300) {
printf("strange: f[%3d] = %12.6g very small.\n",h,fh);
continue;
}
lnfXs[igrid] += log(fh)*com.fpatt[h];
}
t = lnfXs[igrid]-S2;
if(t>0) { /* change scale factor S2 */
t = (t<200 ? exp(-t) : 0);
fX = fX*t+1;
for(j=0; j<dim; j++) for(k=0; k<n1d; k++)
postpara[j][k] *= t;
for(k=0; k<n1d*n1d; k++)
postp0p1[k] *= t;
for(j=0; j<dim; j++)
postpara[j][ip[j]] ++;
postp0p1[ip[0]*n1d+ip[1]] ++;
S2 = lnfXs[igrid];
}
else if(t>-200) {
t = exp(t);
fX += t;
for(j=0; j<dim; j++)
postpara[j][ip[j]] += t;
postp0p1[ip[0]*n1d+ip[1]] += t;
}
if((igrid+1)%500==0 || igrid==ngrid-1)
printf("\t%3d / %3d grid points\r", igrid+1,ngrid);
}
for(j=0; j<dim; j++) for(k=0; k<n1d; k++)
postpara[j][k] /= fX;
for(k=0; k<n1d*n1d; k++)
postp0p1[k] /=fX;
fX = log(fX)+S2;
printf("\tlog(fX) = %12.6f S = %12.6f %12.6f\n", fX+S1-dim*log(n1d*1.),S1,S2);
/* calculate posterior probabilities for sites. S1 is scale factor */
printf("Calculating f(w|X), posterior probs of site classes.\n");
for(h=0; h<com.npatt; h++) {
S1 = -1e300;
for(j=0; j<nclassM; j++)
postSite[j*com.npatt+h] = 1;
for(igrid=0; igrid<ngrid; igrid++) {
for(j=dim-1,it=igrid; j>=0; j--) { ip[j]=it%n1d; it/=n1d; }
for(k=0,fh=0; k<nclassM; k++) /* duplicated calculation */
fh += fhk[k] = pclassM[igrid*nclassM+k]*com.fhK[iw[igrid*nclassM+k]*com.npatt+h];
for(iclassM=0; iclassM<nclassM; iclassM++) {
fhk[iclassM] /= fh;
t = log(fhk[iclassM]) + lnfXs[igrid]; /* t is log of term on grid */
if(t>S1 + 50) { /* change scale factor S1 */
for(j=0; j<nclassM; j++)
postSite[j*com.npatt+h] *= exp(S1-t);
S1 = t;
}
postSite[iclassM*com.npatt+h] += exp(t-S1);
}
}
for(j=0; j<nclassM; j++)
postSite[j*com.npatt+h] *= exp(S1-fX);
if((h+1)%10==0 || h==com.npatt-1)
printf("\r\tdid %3d / %3d site patterns %s", h+1,com.npatt,printtime(timestr));
} /* for(h) */
if(debug)
for(k=0,printf("\nS%d: ",site); k<nclassM; k++) printf("%7.4f",postSite[k*com.npatt+site]);
/* print out posterior probabilities */
fprintf(frst,"\nBayes Empirical Bayes (BEB) probabilities for %d classes (class)", nclassM);
fprintf(fout,"\nBayes Empirical Bayes (BEB) analysis");
fprintf(fout," (Yang, Wong & Nielsen 2005. Mol. Biol. Evol. 22:1107-1118)");
com.ncatG = ncatG0;
PrintProbNSsites(frst, postSite, NULL, NULL, nclassM, refsp);
if(com.model==2) { /* branch&site model A */
fprintf(fout,"\nPositive sites for foreground lineages Prob(w>1):\n");
for(h=0; h<lst; h++) {
hp = (!com.readpattern ? com.pose[h] : h);
aa = GetAASiteSpecies(refsp, hp);
t = postSite[2*com.npatt+hp] + postSite[3*com.npatt+hp];
if(t>cutoff) {
sig=""; if(t>.95) sig="*"; if(t>.99) sig="**";
fprintf(fout,"%6d %c %.3f%s\n",h+1, aa, t, sig);
}
}
}
fprintf(fout, "\n\nThe grid (see ternary graph for p0-p1)\n\n");
for(j=2; j<dim; j++,FPN(fout)) {
fprintf(fout, "%-2s: ", paras[j]);
for(k=0; k<n1d; k++)
fprintf(fout, " %6.3f", para[j][k]);
}
for(k=0; k<n1d; k++)
postpara[0][k] = postpara[1][k]=-1;
fprintf(fout, "\n\nPosterior on the grid\n\n");
for(j=2; j<dim; j++,FPN(fout)) {
fprintf(fout, "%-2s: ", paras[j]);
for(k=0; k<n1d; k++)
fprintf(fout, " %6.3f", postpara[j][k]);
}
fprintf(fout,"\nPosterior for p0-p1 (see the ternary graph)\n\n");
for(k=0; k<n1d*n1d; k++) {
fprintf(fout," %5.3f", postp0p1[k]);
if(fabs(square((int)sqrt(k+1.))-(k+1))<1e-5) FPN(fout);
}
fprintf(fout,"\nsum of density on p0-p1 = %10.6f\n", sum(postp0p1,n1d*n1d));
free(postp0p1);
BayesEB = 0;
return(0);
}
|
b7a71897988374450f6b582d81605eda5e493ac1.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| b7a71897988374450f6b582d81605eda5e493ac1.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
77706865c4c32b41b39c695199760f7181671fd1.hip | // !!! This is a file automatically generated by hipify!!!
#include <cnpy.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <rocblas.h>
#include <time.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_cooperative_groups.h>
#include <cudnn.h>
#include <cassert>
#include <cstdlib>
#include <iostream>
#define BATCH_SIZE 1
#define FILTER_X 3
#define FILTER_Y 3
#define WITH_RELU 0
//#define IC 32 //128
//#define OC 32 //128
//#define IMAGE_DIM 14 //28
#define A_dim OC
#define B_dim IC * FILTER_X * FILTER_Y
#ifndef HALF
#define HALF 0
#endif
#if HALF
#define C_dim (IMAGE_DIM * IMAGE_DIM / 2)
#else
#define C_dim IMAGE_DIM * IMAGE_DIM
#endif
//#define Tsx 3
#define Tsy 1
#define Tsz (C_dim / C_Blocks)
#define Fx 1
#define Fy (Tsz/Fx)
#define Usy (Tsy * Fy) // this better be a multiple of 2, ideally 32
#define Gsy (Usy)
//#define Gx 36
#define Block_size (Gy * Gsy)
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( hipError_t err, const char *file, const int line )
{
if( hipSuccess != err) {
fprintf(stderr,
"CUDA Driver API error = %04d from file <%s>, line %i.",
err, file, line );
exit(-1);
}
}
hipDevice_t device;
hipCtx_t context;
hipModule_t module;
hipFunction_t function;
size_t totalGlobalMem;
char *module_file = (char*) "testing_conv.cubin";
#if RESIDUAL
char *kernel_name = (char*) "_Z2mmPKfS0_Pf";
#else
char *kernel_name = (char*) "_Z2mmPKfPf";
#endif
// --- functions -----------------------------------------------------------
void initCUDA()
{
int deviceCount = 0;
hipError_t err = hipInit(0);
int major = 0, minor = 0;
if (err == hipSuccess)
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "Error: no devices supporting CUDA");
exit(-1);
}
// get first CUDA device
checkCudaErrors(hipDeviceGet(&device, 0));
char name[100];
hipDeviceGetName(name, 100, device);
printf("> Using device 0: %s", name);
// get compute capabilities and the devicename
checkCudaErrors( hipDeviceComputeCapability(&major, &minor, device) );
printf("> GPU Device has SM %d.%d compute capability", major, minor);
err = hipCtxCreate(&context, 0, device);
if (err != hipSuccess) {
fprintf(stderr, "* Error initializing the CUDA context.");
hipCtxDetach(context);
exit(-1);
}
err = hipModuleLoad(&module, module_file);
std::cout << err << std::endl;
if (err != hipSuccess) {
fprintf(stderr, "* Error loading the module %s", module_file);
hipCtxDetach(context);
exit(-1);
}
err = hipModuleGetFunction(&function, module, kernel_name);
if (err != hipSuccess) {
fprintf(stderr, "* Error getting kernel function %s", kernel_name);
hipCtxDetach(context);
exit(-1);
}
}
void runKernel(hipDeviceptr_t d_BC, hipDeviceptr_t d_residual, hipDeviceptr_t d_AC)
{
#if RESIDUAL
void *args[3] = { &d_BC, &d_residual, &d_AC};
#else
void *args[2] = { &d_BC, &d_AC};
#endif
// grid for kernel: <<<N, 1>>>
hipEvent_t start, stop;
hipEventCreate(&start,0);
hipEventCreate(&stop,0);
hipEventRecord(start,0);
std::cout << A_Blocks << " " << C_Blocks << " " << Block_size << std::endl;
for(int i = 0;i < 100; i ++){
checkCudaErrors( hipModuleLaunchKernel(function, A_Blocks, C_Blocks, 1, // Nx1x1 blocks
Block_size, 1, 1, // 1x1x1 threads
0, 0, args, 0) );
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float time;
hipEventElapsedTime(&time,start,stop);
hipEventDestroy(start);
hipEventDestroy(stop);
std::cout << "kernel used " << time / 100.0 << std::endl;
}
void float2half(float * in, __half * out, int n) {
for(int i=0; i<n; i++){
out[i] = __float2half(in[i]);
}
}
void half2float(__half * in, float * out, int n) {
for(int i=0; i<n; i++){
out[i] = __half2float(in[i]);
}
}
float get_diff(float * t1, float * t2, int size)
{
float diff = 0;
for(int i = 0; i < size; i ++){
diff += abs(t1[i] - t2[i]);
// if(t1[i] - t2[i] > 3){
// std::cout << i << std::endl;
// }
}
return diff;
}
void transpose(float * input, float * output, int dim1, int dim2) {
for(int i = 0; i < dim2; i ++)
for (int j = 0; j < dim1; j ++)
{
output[i * dim1 + j] = input[j * dim2 + i];
}
}
void fillvector(float *data, int n) {
for(int i=0; i<n; i++){
data[i] = float(rand() % 10);
}
}
// filter shape in OC, K, K, IC
// input shape in H, W, IC
// output shape in H, W, OC
// each block is responsible for a number of output channels
#define CPB 2
#define WIDTH (IC / 2) // aka block size
__device__ inline void atomicAddWarpToArray(__half2 *array, int index, __half2 val)
{
val += __shfl_down_sync(-1, val, 16);
val += __shfl_down_sync(-1, val, 8);
val += __shfl_down_sync(-1, val, 4);
val += __shfl_down_sync(-1, val, 2);
val += __shfl_down_sync(-1, val, 1);
if(threadIdx.x % 32 == 0) {
array[index] = val;
}
}
__device__ inline __half2 atomicAddWarp(__half2 val)
{
val += __shfl_down_sync(-1, val, 16);
val += __shfl_down_sync(-1, val, 8);
val += __shfl_down_sync(-1, val, 4);
val += __shfl_down_sync(-1, val, 2);
val += __shfl_down_sync(-1, val, 1);
return val;
}
__global__ void direct(__half2* input, __half2 * filter, __half * output)
{
int my_oc_start = blockIdx.x * CPB;
register __half2 my_kernel[CPB][FILTER_X][FILTER_Y];
__shared__ __half2 reduction_buffer[32];
//__shared__ __half2 coalesce_buffer[]
if(threadIdx.x < 32)
{
reduction_buffer[threadIdx.x] = __float2half2_rn(0.0f);
}
__syncthreads();
for(int oc = 0; oc < CPB; oc ++)
{
int filter_offset = my_oc_start * WIDTH * FILTER_X * FILTER_Y + oc * WIDTH * FILTER_X * FILTER_Y;
my_kernel[oc][0][0] = filter[filter_offset + threadIdx.x];
my_kernel[oc][0][1] = filter[filter_offset + WIDTH + threadIdx.x];
my_kernel[oc][0][2] = filter[filter_offset + WIDTH * 2 + threadIdx.x];
my_kernel[oc][1][0] = filter[filter_offset + WIDTH * 3 + threadIdx.x];
my_kernel[oc][1][1] = filter[filter_offset + WIDTH * 4 + threadIdx.x];
my_kernel[oc][1][2] = filter[filter_offset + WIDTH * 5 + threadIdx.x];
my_kernel[oc][2][0] = filter[filter_offset + WIDTH * 6 + threadIdx.x];
my_kernel[oc][2][1] = filter[filter_offset + WIDTH * 7 + threadIdx.x];
my_kernel[oc][2][2] = filter[filter_offset + WIDTH * 8 + threadIdx.x];
}
__half2 intermediate[CPB];
__half2 input_10 = __float2half2_rn(0.0f);
__half2 input_11 = __float2half2_rn(0.0f);
__half2 input_12 = input[threadIdx.x];
__half2 input_20 = __float2half2_rn(0.0f);
__half2 input_21 = __float2half2_rn(0.0f);
__half2 input_22 = input[IMAGE_DIM * WIDTH + threadIdx.x];
for (int col = 0; col < IMAGE_DIM; col++) {
input_10 = input_11;
input_11 = input_12;
input_12 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[(col + 1) * WIDTH + threadIdx.x];
input_20 = input_21;
input_21 = input_22;
input_22 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[IMAGE_DIM * WIDTH + (col + 1) * WIDTH + threadIdx.x];
for (int oc = 0; oc < CPB; oc++) {
intermediate[oc] = __hmul2(input_10,my_kernel[oc][1][0]);
intermediate[oc] = __hfma2(input_11 , my_kernel[oc][1][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_12 , my_kernel[oc][1][2],intermediate[oc]);
intermediate[oc] = __hfma2(input_20 , my_kernel[oc][2][0],intermediate[oc]);
intermediate[oc] = __hfma2(input_21 , my_kernel[oc][2][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_22 , my_kernel[oc][2][2],intermediate[oc]);
atomicAddWarpToArray(reduction_buffer, threadIdx.x / 32, intermediate[oc]);
__syncthreads();
__half2 result;
if (threadIdx.x < WIDTH / 32) {
result = atomicAddWarp(reduction_buffer[threadIdx.x]);
}
if (threadIdx.x == 0) {
output[col * OC + my_oc_start + oc] = __high2half(result) + __low2half(result);
}
}
}
for(int row = 1; row < IMAGE_DIM - 1; row ++) {
__half2 input_00 = __float2half2_rn(0.0f);
__half2 input_01 = __float2half2_rn(0.0f);
__half2 input_02 = input[(row - 1) * IMAGE_DIM * WIDTH + threadIdx.x];
__half2 input_10 = __float2half2_rn(0.0f);
__half2 input_11 = __float2half2_rn(0.0f);
__half2 input_12 = input[(row ) * IMAGE_DIM * WIDTH + threadIdx.x];
__half2 input_20 = __float2half2_rn(0.0f);
__half2 input_21 = __float2half2_rn(0.0f);
__half2 input_22 = input[(row + 1) * IMAGE_DIM * WIDTH + threadIdx.x];
for (int col = 0; col < IMAGE_DIM; col++) {
input_00 = input_01;
input_01 = input_02;
input_02 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[(row - 1) * IMAGE_DIM * WIDTH + (col + 1) * WIDTH + threadIdx.x];
input_10 = input_11;
input_11 = input_12;
input_12 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[(row ) * IMAGE_DIM * WIDTH + (col + 1) * WIDTH + threadIdx.x];
input_20 = input_21;
input_21 = input_22;
input_22 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[(row + 1) * IMAGE_DIM * WIDTH + (col + 1) * WIDTH + threadIdx.x];
for (int oc = 0; oc < CPB; oc++) {
intermediate[oc] = __hmul2(input_00 ,my_kernel[oc][0][0]);
intermediate[oc] = __hfma2(input_01 , my_kernel[oc][0][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_02 , my_kernel[oc][0][2],intermediate[oc]);
intermediate[oc] = __hfma2(input_10 , my_kernel[oc][1][0],intermediate[oc]);
intermediate[oc] = __hfma2(input_11 , my_kernel[oc][1][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_12 , my_kernel[oc][1][2],intermediate[oc]);
intermediate[oc] = __hfma2(input_20 , my_kernel[oc][2][0],intermediate[oc]);
intermediate[oc] = __hfma2(input_21 , my_kernel[oc][2][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_22 , my_kernel[oc][2][2],intermediate[oc]);
atomicAddWarpToArray(reduction_buffer, threadIdx.x / 32, intermediate[oc]);
__syncthreads();
__half2 result;
if (threadIdx.x < WIDTH / 32) {
result = atomicAddWarp(reduction_buffer[threadIdx.x]);
}
if (threadIdx.x == 0) {
output[row * IMAGE_DIM * OC + col * OC + my_oc_start + oc] = __high2half(result) + __low2half(result);
}
}
}
}
int row = IMAGE_DIM - 1;
__half2 input_00 = __float2half2_rn(0.0f);
__half2 input_01 = __float2half2_rn(0.0f);
__half2 input_02 = input[(row - 1) * IMAGE_DIM * WIDTH + threadIdx.x];
input_10 = __float2half2_rn(0.0f);
input_11 = __float2half2_rn(0.0f);
input_12 = input[(row ) * IMAGE_DIM * WIDTH + threadIdx.x];
for (int col = 0; col < IMAGE_DIM; col++) {
input_00 = input_01;
input_01 = input_02;
input_02 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[(row - 1) * IMAGE_DIM * WIDTH + (col + 1) * WIDTH + threadIdx.x];
input_10 = input_11;
input_11 = input_12;
input_12 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[(row ) * IMAGE_DIM * WIDTH + (col + 1) * WIDTH + threadIdx.x];
for (int oc = 0; oc < CPB; oc++) {
intermediate[oc] = __hmul2(input_00 , my_kernel[oc][0][0]);
intermediate[oc] = __hfma2(input_01 , my_kernel[oc][0][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_02 , my_kernel[oc][0][2],intermediate[oc]);
intermediate[oc] = __hfma2(input_10 , my_kernel[oc][1][0],intermediate[oc]);
intermediate[oc] = __hfma2(input_11 , my_kernel[oc][1][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_12 , my_kernel[oc][1][2],intermediate[oc]);
atomicAddWarpToArray(reduction_buffer, threadIdx.x / 32, intermediate[oc]);
__half2 result;
__syncthreads();
if (threadIdx.x < WIDTH / 32) {
result = atomicAddWarp(reduction_buffer[threadIdx.x]);
}
if (threadIdx.x == 0) {
output[row * IMAGE_DIM * OC + col * OC + my_oc_start + oc] = __high2half(result) + __low2half(result);
}
}
}
}
int main(int argc, char const *argv[]) {
cudnnHandle_t cudnn;
checkCUDNN(cudnnCreate(&cudnn));
// fillvector((float*)input, IC * IMAGE_DIM * IMAGE_DIM);
// fillvector((float*)kernel, IC * OC * FILTER_Y * FILTER_X);
cnpy::NpyArray arr = cnpy::npy_load("filter.npy");
float * kernel = arr.data<float>();
assert(arr.word_size = sizeof(float));
assert(arr.shape.size()==2 && arr.shape[0] == OC && arr.shape[1] == IC * FILTER_X * FILTER_Y); //reference kernel for cudnn
cnpy::NpyArray arr2 = cnpy::npy_load("filter_KFFC.npy");
float * kernel_KFFC = arr2.data<float>();
assert(arr2.word_size = sizeof(float));
assert(arr2.shape.size()==4 && arr2.shape[0] == OC && arr2.shape[1] == FILTER_X && arr2.shape[2] == FILTER_Y && arr2.shape[3] == IC); //reference kernel for cudnn
cnpy::NpyArray arr1 = cnpy::npy_load("input.npy");
float * input = arr1.data<float>();
assert(arr1.word_size = sizeof(float));
assert(arr1.shape.size()==3 && arr1.shape[0] == IC && arr1.shape[1] == IMAGE_DIM && arr1.shape[2] == IMAGE_DIM); //reference kernel for cudnn
#if RESIDUAL
cnpy::NpyArray arr3 = cnpy::npy_load("residual.npy");
float * residual = arr3.data<float>();
assert(arr3.word_size = sizeof(float));
assert(arr1.shape.size()==3 && arr1.shape[0] == OC && arr1.shape[1] == IMAGE_DIM && arr1.shape[2] == IMAGE_DIM);
__half * residual_h;
residual_h = (__half *)malloc(OC * IMAGE_DIM * IMAGE_DIM * 2);
float2half(residual,residual_h,OC * IMAGE_DIM * IMAGE_DIM);
std::cout << residual[0] << std::endl;
#endif
auto transposed_input = (float * ) malloc(IC * IMAGE_DIM * IMAGE_DIM * 4);
transpose(input, transposed_input, IC , IMAGE_DIM * IMAGE_DIM);
initCUDA();
#if HALF
__half * kernel_h, *input_h, *kernel_KFFC_h, *transposed_input_h;
kernel_h = (__half *)malloc(IC * OC * FILTER_X * FILTER_Y *2);
kernel_KFFC_h = (__half *)malloc(IC * OC * FILTER_X * FILTER_Y * 2);
input_h = (__half *)malloc(IC * IMAGE_DIM * IMAGE_DIM *2);
transposed_input_h = (__half *) malloc(IC * IMAGE_DIM * IMAGE_DIM * 2);
float2half(transposed_input, transposed_input_h, IC * IMAGE_DIM * IMAGE_DIM);
float2half(kernel, kernel_h, IC * OC * FILTER_X * FILTER_Y);
float2half(kernel_KFFC,kernel_KFFC_h, IC * OC * FILTER_X * FILTER_Y);
float2half(input, input_h, IC * IMAGE_DIM * IMAGE_DIM);
#endif
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor,
/*format=*/CUDNN_TENSOR_NCHW,
#if HALF
CUDNN_DATA_HALF,
#else
/*dataType=*/CUDNN_DATA_FLOAT,
#endif
/*batch_size=*/BATCH_SIZE,
/*channels=*/IC,
/*image_height=*/IMAGE_DIM,
/*image_width=*/IMAGE_DIM));
cudnnFilterDescriptor_t kernel_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor,
#if HALF
CUDNN_DATA_HALF,
#else
/*dataType=*/CUDNN_DATA_FLOAT,
#endif
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/OC,
/*in_channels=*/IC,
/*kernel_height=*/FILTER_X,
/*kernel_width=*/FILTER_Y));
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor,
/*format=*/CUDNN_TENSOR_NCHW,
#if HALF
CUDNN_DATA_HALF,
#else
/*dataType=*/CUDNN_DATA_FLOAT,
#endif
/*batch_size=*/BATCH_SIZE,
/*channels=*/OC,
/*image_height=*/IMAGE_DIM,
/*image_width=*/IMAGE_DIM));
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
#if HALF
CUDNN_DATA_HALF
#else
/*dataType=*/CUDNN_DATA_FLOAT
#endif
));
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(
cudnnGetConvolutionForwardAlgorithm(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_algorithm));
std::cout << "picked algorithm: " << convolution_algorithm << std::endl;
size_t workspace_bytes = 0;
//convolution_algorithm = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
convolution_algorithm,
&workspace_bytes));
std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB"
<< std::endl;
#if HALF
int input_bytes = 1 * IC * IMAGE_DIM * IMAGE_DIM * 2;
int output_bytes = 1 * OC * IMAGE_DIM * IMAGE_DIM * 2;
int kernel_bytes = IC * OC * FILTER_Y * FILTER_X * 2;
__half * d_residual{nullptr};
std::cout << RESIDUAL << std::endl;
#if RESIDUAL
hipMalloc((void **)&d_residual,output_bytes) ;
hipMemcpy(d_residual, residual_h,output_bytes,hipMemcpyHostToDevice);
#endif
__half * d_input{nullptr};
hipMalloc((void **)&d_input, input_bytes);
hipMemcpy(d_input, input_h, input_bytes, hipMemcpyHostToDevice);
__half * d_output{nullptr};
hipMalloc(&d_output, output_bytes);
hipMemset(d_output, 0, output_bytes);
__half * d_transposed_input{nullptr};
hipMalloc((void **)&d_transposed_input, input_bytes);
hipMemcpy(d_transposed_input, transposed_input_h, input_bytes, hipMemcpyHostToDevice);
__half * d_kernel{nullptr};
hipMalloc(&d_kernel, kernel_bytes);
hipMemcpy(d_kernel, kernel_h, kernel_bytes, hipMemcpyHostToDevice);
__half * d_kernel_KFFC{nullptr};
hipMalloc(&d_kernel_KFFC, kernel_bytes);
hipMemcpy(d_kernel_KFFC, kernel_KFFC_h, kernel_bytes, hipMemcpyHostToDevice);
#else
int input_bytes = 1 * IC * IMAGE_DIM * IMAGE_DIM * sizeof(float);
int output_bytes = 1 * OC * IMAGE_DIM * IMAGE_DIM * sizeof(float);
int kernel_bytes = IC * OC * FILTER_Y * FILTER_X * sizeof(float);
float* d_input{nullptr};
hipMalloc((void **)&d_input, input_bytes);
hipMemcpy(d_input, input, input_bytes, hipMemcpyHostToDevice);
float* d_output{nullptr};
hipMalloc(&d_output, output_bytes);
hipMemset(d_output, 0, output_bytes);
float* d_kernel{nullptr};
hipMalloc(&d_kernel, kernel_bytes);
hipMemcpy(d_kernel, kernel, kernel_bytes, hipMemcpyHostToDevice);
float * d_residual{nullptr};
#endif
void* d_workspace{nullptr};
hipMalloc(&d_workspace, workspace_bytes);
const float alpha = 1, beta = 0;
hipDeviceSynchronize();
for(int i = 0; i < 10; i ++) {
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_kernel,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
}
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
for(int i = 0; i < 10; i ++) {
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_kernel,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float time;
hipEventElapsedTime(&time,start,stop);
std::cout << "baseline used " << time / 10 << std::endl;
hipDeviceSynchronize();
float* h_output = (float *)malloc(OC * IMAGE_DIM * IMAGE_DIM * 4);
#if HALF
__half * h_output_h = (__half *)malloc(output_bytes);
hipMemcpy(h_output_h, d_output, output_bytes, hipMemcpyDeviceToHost);
half2float(h_output_h,h_output,output_bytes / 2);
#else
hipMemcpy(h_output, d_output, output_bytes, hipMemcpyDeviceToHost);
#endif
hipDeviceSynchronize();
std::cout << h_output[0] << std::endl;
cnpy::npy_save("cudnn_output.npy",&h_output[0],{OC, IMAGE_DIM, IMAGE_DIM},"w");
hipFree(d_output);
hipMalloc(&d_output, output_bytes);
hipMemset(d_output, 0, output_bytes);
hipProfilerStart();
hipEventRecord(start);
// for(int i = 0; i < 10; i ++) {
// direct << < OC / CPB, WIDTH >> > ((__half2 * )
// d_transposed_input, (__half2 * )
// d_kernel_KFFC, d_output);
// }
hipEventRecord(stop);
hipEventSynchronize(stop);
hipProfilerStop();
hipEventElapsedTime(&time,start,stop);
std::cout << "direct used " << time / 10<< std::endl;
hipDeviceSynchronize();
float* h_direct_output = (float *)malloc(OC * IMAGE_DIM * IMAGE_DIM * 4);
float* h_direct_output_CHW = (float *)malloc(OC * IMAGE_DIM * IMAGE_DIM * 4);
#if HALF
__half * h_direct_output_h = (__half *)malloc(output_bytes);
hipMemcpy(h_direct_output_h, d_output, output_bytes, hipMemcpyDeviceToHost);
half2float(h_direct_output_h,h_direct_output,output_bytes / 2);
#else
hipMemcpy(h_direct_output, d_output, output_bytes, hipMemcpyDeviceToHost);
#endif
hipDeviceSynchronize();
transpose(h_direct_output, h_direct_output_CHW, IMAGE_DIM * IMAGE_DIM, OC);
std::cout << h_direct_output_CHW[0] << std::endl;
cnpy::npy_save("direct_output.npy",&h_direct_output_CHW[0],{OC, IMAGE_DIM, IMAGE_DIM},"w");
hipFree(d_output);
hipMalloc(&d_output, output_bytes);
hipMemset(d_output, 0, output_bytes);
runKernel(reinterpret_cast<hipDeviceptr_t>(d_input),reinterpret_cast<hipDeviceptr_t>(d_residual), reinterpret_cast<hipDeviceptr_t>(d_output));
float* h_output_kernel = (float *)malloc(OC * IMAGE_DIM * IMAGE_DIM * 4);
#if HALF
__half * h_output_kernel_h = (__half *)malloc(output_bytes);
hipMemcpy(h_output_kernel_h, d_output, output_bytes, hipMemcpyDeviceToHost);
half2float(h_output_kernel_h,h_output_kernel,output_bytes/2);
#else
hipMemcpy(h_output_kernel, d_output, output_bytes, hipMemcpyDeviceToHost);
#endif
cnpy::npy_save("kernel_output.npy",&h_output_kernel[0],{OC, IMAGE_DIM, IMAGE_DIM},"w");
std::cout << h_output_kernel[0] << std::endl;
std::cout << "Difference: " << get_diff(h_output,h_output_kernel,OC * IMAGE_DIM * IMAGE_DIM) << std::endl;
// Do something with h_output ...
delete[] h_output;
hipFree(d_kernel);
hipFree(d_input);
hipFree(d_output);
hipFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(kernel_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
cudnnDestroy(cudnn);
}
| 77706865c4c32b41b39c695199760f7181671fd1.cu | #include <cnpy.h>
#include <vector>
#include <cuda.h>
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <cublas_v2.h>
#include <time.h>
#include <cuda_profiler_api.h>
#include <cooperative_groups.h>
#include <cudnn.h>
#include <cassert>
#include <cstdlib>
#include <iostream>
#define BATCH_SIZE 1
#define FILTER_X 3
#define FILTER_Y 3
#define WITH_RELU 0
//#define IC 32 //128
//#define OC 32 //128
//#define IMAGE_DIM 14 //28
#define A_dim OC
#define B_dim IC * FILTER_X * FILTER_Y
#ifndef HALF
#define HALF 0
#endif
#if HALF
#define C_dim (IMAGE_DIM * IMAGE_DIM / 2)
#else
#define C_dim IMAGE_DIM * IMAGE_DIM
#endif
//#define Tsx 3
#define Tsy 1
#define Tsz (C_dim / C_Blocks)
#define Fx 1
#define Fy (Tsz/Fx)
#define Usy (Tsy * Fy) // this better be a multiple of 2, ideally 32
#define Gsy (Usy)
//#define Gx 36
#define Block_size (Gy * Gsy)
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( CUresult err, const char *file, const int line )
{
if( CUDA_SUCCESS != err) {
fprintf(stderr,
"CUDA Driver API error = %04d from file <%s>, line %i.",
err, file, line );
exit(-1);
}
}
CUdevice device;
CUcontext context;
CUmodule module;
CUfunction function;
size_t totalGlobalMem;
char *module_file = (char*) "testing_conv.cubin";
#if RESIDUAL
char *kernel_name = (char*) "_Z2mmPKfS0_Pf";
#else
char *kernel_name = (char*) "_Z2mmPKfPf";
#endif
// --- functions -----------------------------------------------------------
void initCUDA()
{
int deviceCount = 0;
CUresult err = cuInit(0);
int major = 0, minor = 0;
if (err == CUDA_SUCCESS)
checkCudaErrors(cuDeviceGetCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "Error: no devices supporting CUDA");
exit(-1);
}
// get first CUDA device
checkCudaErrors(cuDeviceGet(&device, 0));
char name[100];
cuDeviceGetName(name, 100, device);
printf("> Using device 0: %s", name);
// get compute capabilities and the devicename
checkCudaErrors( cuDeviceComputeCapability(&major, &minor, device) );
printf("> GPU Device has SM %d.%d compute capability", major, minor);
err = cuCtxCreate(&context, 0, device);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "* Error initializing the CUDA context.");
cuCtxDetach(context);
exit(-1);
}
err = cuModuleLoad(&module, module_file);
std::cout << err << std::endl;
if (err != CUDA_SUCCESS) {
fprintf(stderr, "* Error loading the module %s", module_file);
cuCtxDetach(context);
exit(-1);
}
err = cuModuleGetFunction(&function, module, kernel_name);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "* Error getting kernel function %s", kernel_name);
cuCtxDetach(context);
exit(-1);
}
}
void runKernel(CUdeviceptr d_BC, CUdeviceptr d_residual, CUdeviceptr d_AC)
{
#if RESIDUAL
void *args[3] = { &d_BC, &d_residual, &d_AC};
#else
void *args[2] = { &d_BC, &d_AC};
#endif
// grid for kernel: <<<N, 1>>>
CUevent start, stop;
cuEventCreate(&start,0);
cuEventCreate(&stop,0);
cuEventRecord(start,0);
std::cout << A_Blocks << " " << C_Blocks << " " << Block_size << std::endl;
for(int i = 0;i < 100; i ++){
checkCudaErrors( cuLaunchKernel(function, A_Blocks, C_Blocks, 1, // Nx1x1 blocks
Block_size, 1, 1, // 1x1x1 threads
0, 0, args, 0) );
}
cuEventRecord(stop,0);
cuEventSynchronize(stop);
float time;
cuEventElapsedTime(&time,start,stop);
cuEventDestroy(start);
cuEventDestroy(stop);
std::cout << "kernel used " << time / 100.0 << std::endl;
}
void float2half(float * in, __half * out, int n) {
for(int i=0; i<n; i++){
out[i] = __float2half(in[i]);
}
}
void half2float(__half * in, float * out, int n) {
for(int i=0; i<n; i++){
out[i] = __half2float(in[i]);
}
}
float get_diff(float * t1, float * t2, int size)
{
float diff = 0;
for(int i = 0; i < size; i ++){
diff += abs(t1[i] - t2[i]);
// if(t1[i] - t2[i] > 3){
// std::cout << i << std::endl;
// }
}
return diff;
}
void transpose(float * input, float * output, int dim1, int dim2) {
for(int i = 0; i < dim2; i ++)
for (int j = 0; j < dim1; j ++)
{
output[i * dim1 + j] = input[j * dim2 + i];
}
}
void fillvector(float *data, int n) {
for(int i=0; i<n; i++){
data[i] = float(rand() % 10);
}
}
// filter shape in OC, K, K, IC
// input shape in H, W, IC
// output shape in H, W, OC
// each block is responsible for a number of output channels
#define CPB 2
#define WIDTH (IC / 2) // aka block size
__device__ inline void atomicAddWarpToArray(__half2 *array, int index, __half2 val)
{
val += __shfl_down_sync(-1, val, 16);
val += __shfl_down_sync(-1, val, 8);
val += __shfl_down_sync(-1, val, 4);
val += __shfl_down_sync(-1, val, 2);
val += __shfl_down_sync(-1, val, 1);
if(threadIdx.x % 32 == 0) {
array[index] = val;
}
}
__device__ inline __half2 atomicAddWarp(__half2 val)
{
val += __shfl_down_sync(-1, val, 16);
val += __shfl_down_sync(-1, val, 8);
val += __shfl_down_sync(-1, val, 4);
val += __shfl_down_sync(-1, val, 2);
val += __shfl_down_sync(-1, val, 1);
return val;
}
__global__ void direct(__half2* input, __half2 * filter, __half * output)
{
int my_oc_start = blockIdx.x * CPB;
register __half2 my_kernel[CPB][FILTER_X][FILTER_Y];
__shared__ __half2 reduction_buffer[32];
//__shared__ __half2 coalesce_buffer[]
if(threadIdx.x < 32)
{
reduction_buffer[threadIdx.x] = __float2half2_rn(0.0f);
}
__syncthreads();
for(int oc = 0; oc < CPB; oc ++)
{
int filter_offset = my_oc_start * WIDTH * FILTER_X * FILTER_Y + oc * WIDTH * FILTER_X * FILTER_Y;
my_kernel[oc][0][0] = filter[filter_offset + threadIdx.x];
my_kernel[oc][0][1] = filter[filter_offset + WIDTH + threadIdx.x];
my_kernel[oc][0][2] = filter[filter_offset + WIDTH * 2 + threadIdx.x];
my_kernel[oc][1][0] = filter[filter_offset + WIDTH * 3 + threadIdx.x];
my_kernel[oc][1][1] = filter[filter_offset + WIDTH * 4 + threadIdx.x];
my_kernel[oc][1][2] = filter[filter_offset + WIDTH * 5 + threadIdx.x];
my_kernel[oc][2][0] = filter[filter_offset + WIDTH * 6 + threadIdx.x];
my_kernel[oc][2][1] = filter[filter_offset + WIDTH * 7 + threadIdx.x];
my_kernel[oc][2][2] = filter[filter_offset + WIDTH * 8 + threadIdx.x];
}
__half2 intermediate[CPB];
__half2 input_10 = __float2half2_rn(0.0f);
__half2 input_11 = __float2half2_rn(0.0f);
__half2 input_12 = input[threadIdx.x];
__half2 input_20 = __float2half2_rn(0.0f);
__half2 input_21 = __float2half2_rn(0.0f);
__half2 input_22 = input[IMAGE_DIM * WIDTH + threadIdx.x];
for (int col = 0; col < IMAGE_DIM; col++) {
input_10 = input_11;
input_11 = input_12;
input_12 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[(col + 1) * WIDTH + threadIdx.x];
input_20 = input_21;
input_21 = input_22;
input_22 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[IMAGE_DIM * WIDTH + (col + 1) * WIDTH + threadIdx.x];
for (int oc = 0; oc < CPB; oc++) {
intermediate[oc] = __hmul2(input_10,my_kernel[oc][1][0]);
intermediate[oc] = __hfma2(input_11 , my_kernel[oc][1][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_12 , my_kernel[oc][1][2],intermediate[oc]);
intermediate[oc] = __hfma2(input_20 , my_kernel[oc][2][0],intermediate[oc]);
intermediate[oc] = __hfma2(input_21 , my_kernel[oc][2][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_22 , my_kernel[oc][2][2],intermediate[oc]);
atomicAddWarpToArray(reduction_buffer, threadIdx.x / 32, intermediate[oc]);
__syncthreads();
__half2 result;
if (threadIdx.x < WIDTH / 32) {
result = atomicAddWarp(reduction_buffer[threadIdx.x]);
}
if (threadIdx.x == 0) {
output[col * OC + my_oc_start + oc] = __high2half(result) + __low2half(result);
}
}
}
for(int row = 1; row < IMAGE_DIM - 1; row ++) {
__half2 input_00 = __float2half2_rn(0.0f);
__half2 input_01 = __float2half2_rn(0.0f);
__half2 input_02 = input[(row - 1) * IMAGE_DIM * WIDTH + threadIdx.x];
__half2 input_10 = __float2half2_rn(0.0f);
__half2 input_11 = __float2half2_rn(0.0f);
__half2 input_12 = input[(row ) * IMAGE_DIM * WIDTH + threadIdx.x];
__half2 input_20 = __float2half2_rn(0.0f);
__half2 input_21 = __float2half2_rn(0.0f);
__half2 input_22 = input[(row + 1) * IMAGE_DIM * WIDTH + threadIdx.x];
for (int col = 0; col < IMAGE_DIM; col++) {
input_00 = input_01;
input_01 = input_02;
input_02 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[(row - 1) * IMAGE_DIM * WIDTH + (col + 1) * WIDTH + threadIdx.x];
input_10 = input_11;
input_11 = input_12;
input_12 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[(row ) * IMAGE_DIM * WIDTH + (col + 1) * WIDTH + threadIdx.x];
input_20 = input_21;
input_21 = input_22;
input_22 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[(row + 1) * IMAGE_DIM * WIDTH + (col + 1) * WIDTH + threadIdx.x];
for (int oc = 0; oc < CPB; oc++) {
intermediate[oc] = __hmul2(input_00 ,my_kernel[oc][0][0]);
intermediate[oc] = __hfma2(input_01 , my_kernel[oc][0][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_02 , my_kernel[oc][0][2],intermediate[oc]);
intermediate[oc] = __hfma2(input_10 , my_kernel[oc][1][0],intermediate[oc]);
intermediate[oc] = __hfma2(input_11 , my_kernel[oc][1][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_12 , my_kernel[oc][1][2],intermediate[oc]);
intermediate[oc] = __hfma2(input_20 , my_kernel[oc][2][0],intermediate[oc]);
intermediate[oc] = __hfma2(input_21 , my_kernel[oc][2][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_22 , my_kernel[oc][2][2],intermediate[oc]);
atomicAddWarpToArray(reduction_buffer, threadIdx.x / 32, intermediate[oc]);
__syncthreads();
__half2 result;
if (threadIdx.x < WIDTH / 32) {
result = atomicAddWarp(reduction_buffer[threadIdx.x]);
}
if (threadIdx.x == 0) {
output[row * IMAGE_DIM * OC + col * OC + my_oc_start + oc] = __high2half(result) + __low2half(result);
}
}
}
}
int row = IMAGE_DIM - 1;
__half2 input_00 = __float2half2_rn(0.0f);
__half2 input_01 = __float2half2_rn(0.0f);
__half2 input_02 = input[(row - 1) * IMAGE_DIM * WIDTH + threadIdx.x];
input_10 = __float2half2_rn(0.0f);
input_11 = __float2half2_rn(0.0f);
input_12 = input[(row ) * IMAGE_DIM * WIDTH + threadIdx.x];
for (int col = 0; col < IMAGE_DIM; col++) {
input_00 = input_01;
input_01 = input_02;
input_02 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[(row - 1) * IMAGE_DIM * WIDTH + (col + 1) * WIDTH + threadIdx.x];
input_10 = input_11;
input_11 = input_12;
input_12 = (col == (IMAGE_DIM - 1)) ? __float2half2_rn(0.0f): input[(row ) * IMAGE_DIM * WIDTH + (col + 1) * WIDTH + threadIdx.x];
for (int oc = 0; oc < CPB; oc++) {
intermediate[oc] = __hmul2(input_00 , my_kernel[oc][0][0]);
intermediate[oc] = __hfma2(input_01 , my_kernel[oc][0][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_02 , my_kernel[oc][0][2],intermediate[oc]);
intermediate[oc] = __hfma2(input_10 , my_kernel[oc][1][0],intermediate[oc]);
intermediate[oc] = __hfma2(input_11 , my_kernel[oc][1][1],intermediate[oc]);
intermediate[oc] = __hfma2(input_12 , my_kernel[oc][1][2],intermediate[oc]);
atomicAddWarpToArray(reduction_buffer, threadIdx.x / 32, intermediate[oc]);
__half2 result;
__syncthreads();
if (threadIdx.x < WIDTH / 32) {
result = atomicAddWarp(reduction_buffer[threadIdx.x]);
}
if (threadIdx.x == 0) {
output[row * IMAGE_DIM * OC + col * OC + my_oc_start + oc] = __high2half(result) + __low2half(result);
}
}
}
}
int main(int argc, char const *argv[]) {
cudnnHandle_t cudnn;
checkCUDNN(cudnnCreate(&cudnn));
// fillvector((float*)input, IC * IMAGE_DIM * IMAGE_DIM);
// fillvector((float*)kernel, IC * OC * FILTER_Y * FILTER_X);
cnpy::NpyArray arr = cnpy::npy_load("filter.npy");
float * kernel = arr.data<float>();
assert(arr.word_size = sizeof(float));
assert(arr.shape.size()==2 && arr.shape[0] == OC && arr.shape[1] == IC * FILTER_X * FILTER_Y); //reference kernel for cudnn
cnpy::NpyArray arr2 = cnpy::npy_load("filter_KFFC.npy");
float * kernel_KFFC = arr2.data<float>();
assert(arr2.word_size = sizeof(float));
assert(arr2.shape.size()==4 && arr2.shape[0] == OC && arr2.shape[1] == FILTER_X && arr2.shape[2] == FILTER_Y && arr2.shape[3] == IC); //reference kernel for cudnn
cnpy::NpyArray arr1 = cnpy::npy_load("input.npy");
float * input = arr1.data<float>();
assert(arr1.word_size = sizeof(float));
assert(arr1.shape.size()==3 && arr1.shape[0] == IC && arr1.shape[1] == IMAGE_DIM && arr1.shape[2] == IMAGE_DIM); //reference kernel for cudnn
#if RESIDUAL
cnpy::NpyArray arr3 = cnpy::npy_load("residual.npy");
float * residual = arr3.data<float>();
assert(arr3.word_size = sizeof(float));
assert(arr1.shape.size()==3 && arr1.shape[0] == OC && arr1.shape[1] == IMAGE_DIM && arr1.shape[2] == IMAGE_DIM);
__half * residual_h;
residual_h = (__half *)malloc(OC * IMAGE_DIM * IMAGE_DIM * 2);
float2half(residual,residual_h,OC * IMAGE_DIM * IMAGE_DIM);
std::cout << residual[0] << std::endl;
#endif
auto transposed_input = (float * ) malloc(IC * IMAGE_DIM * IMAGE_DIM * 4);
transpose(input, transposed_input, IC , IMAGE_DIM * IMAGE_DIM);
initCUDA();
#if HALF
__half * kernel_h, *input_h, *kernel_KFFC_h, *transposed_input_h;
kernel_h = (__half *)malloc(IC * OC * FILTER_X * FILTER_Y *2);
kernel_KFFC_h = (__half *)malloc(IC * OC * FILTER_X * FILTER_Y * 2);
input_h = (__half *)malloc(IC * IMAGE_DIM * IMAGE_DIM *2);
transposed_input_h = (__half *) malloc(IC * IMAGE_DIM * IMAGE_DIM * 2);
float2half(transposed_input, transposed_input_h, IC * IMAGE_DIM * IMAGE_DIM);
float2half(kernel, kernel_h, IC * OC * FILTER_X * FILTER_Y);
float2half(kernel_KFFC,kernel_KFFC_h, IC * OC * FILTER_X * FILTER_Y);
float2half(input, input_h, IC * IMAGE_DIM * IMAGE_DIM);
#endif
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor,
/*format=*/CUDNN_TENSOR_NCHW,
#if HALF
CUDNN_DATA_HALF,
#else
/*dataType=*/CUDNN_DATA_FLOAT,
#endif
/*batch_size=*/BATCH_SIZE,
/*channels=*/IC,
/*image_height=*/IMAGE_DIM,
/*image_width=*/IMAGE_DIM));
cudnnFilterDescriptor_t kernel_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor,
#if HALF
CUDNN_DATA_HALF,
#else
/*dataType=*/CUDNN_DATA_FLOAT,
#endif
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/OC,
/*in_channels=*/IC,
/*kernel_height=*/FILTER_X,
/*kernel_width=*/FILTER_Y));
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor,
/*format=*/CUDNN_TENSOR_NCHW,
#if HALF
CUDNN_DATA_HALF,
#else
/*dataType=*/CUDNN_DATA_FLOAT,
#endif
/*batch_size=*/BATCH_SIZE,
/*channels=*/OC,
/*image_height=*/IMAGE_DIM,
/*image_width=*/IMAGE_DIM));
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
#if HALF
CUDNN_DATA_HALF
#else
/*dataType=*/CUDNN_DATA_FLOAT
#endif
));
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(
cudnnGetConvolutionForwardAlgorithm(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_algorithm));
std::cout << "picked algorithm: " << convolution_algorithm << std::endl;
size_t workspace_bytes = 0;
//convolution_algorithm = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
convolution_algorithm,
&workspace_bytes));
std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB"
<< std::endl;
#if HALF
int input_bytes = 1 * IC * IMAGE_DIM * IMAGE_DIM * 2;
int output_bytes = 1 * OC * IMAGE_DIM * IMAGE_DIM * 2;
int kernel_bytes = IC * OC * FILTER_Y * FILTER_X * 2;
__half * d_residual{nullptr};
std::cout << RESIDUAL << std::endl;
#if RESIDUAL
cudaMalloc((void **)&d_residual,output_bytes) ;
cudaMemcpy(d_residual, residual_h,output_bytes,cudaMemcpyHostToDevice);
#endif
__half * d_input{nullptr};
cudaMalloc((void **)&d_input, input_bytes);
cudaMemcpy(d_input, input_h, input_bytes, cudaMemcpyHostToDevice);
__half * d_output{nullptr};
cudaMalloc(&d_output, output_bytes);
cudaMemset(d_output, 0, output_bytes);
__half * d_transposed_input{nullptr};
cudaMalloc((void **)&d_transposed_input, input_bytes);
cudaMemcpy(d_transposed_input, transposed_input_h, input_bytes, cudaMemcpyHostToDevice);
__half * d_kernel{nullptr};
cudaMalloc(&d_kernel, kernel_bytes);
cudaMemcpy(d_kernel, kernel_h, kernel_bytes, cudaMemcpyHostToDevice);
__half * d_kernel_KFFC{nullptr};
cudaMalloc(&d_kernel_KFFC, kernel_bytes);
cudaMemcpy(d_kernel_KFFC, kernel_KFFC_h, kernel_bytes, cudaMemcpyHostToDevice);
#else
int input_bytes = 1 * IC * IMAGE_DIM * IMAGE_DIM * sizeof(float);
int output_bytes = 1 * OC * IMAGE_DIM * IMAGE_DIM * sizeof(float);
int kernel_bytes = IC * OC * FILTER_Y * FILTER_X * sizeof(float);
float* d_input{nullptr};
cudaMalloc((void **)&d_input, input_bytes);
cudaMemcpy(d_input, input, input_bytes, cudaMemcpyHostToDevice);
float* d_output{nullptr};
cudaMalloc(&d_output, output_bytes);
cudaMemset(d_output, 0, output_bytes);
float* d_kernel{nullptr};
cudaMalloc(&d_kernel, kernel_bytes);
cudaMemcpy(d_kernel, kernel, kernel_bytes, cudaMemcpyHostToDevice);
float * d_residual{nullptr};
#endif
void* d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_bytes);
const float alpha = 1, beta = 0;
cudaDeviceSynchronize();
for(int i = 0; i < 10; i ++) {
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_kernel,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
}
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(int i = 0; i < 10; i ++) {
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_kernel,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time,start,stop);
std::cout << "baseline used " << time / 10 << std::endl;
cudaDeviceSynchronize();
float* h_output = (float *)malloc(OC * IMAGE_DIM * IMAGE_DIM * 4);
#if HALF
__half * h_output_h = (__half *)malloc(output_bytes);
cudaMemcpy(h_output_h, d_output, output_bytes, cudaMemcpyDeviceToHost);
half2float(h_output_h,h_output,output_bytes / 2);
#else
cudaMemcpy(h_output, d_output, output_bytes, cudaMemcpyDeviceToHost);
#endif
cudaDeviceSynchronize();
std::cout << h_output[0] << std::endl;
cnpy::npy_save("cudnn_output.npy",&h_output[0],{OC, IMAGE_DIM, IMAGE_DIM},"w");
cudaFree(d_output);
cudaMalloc(&d_output, output_bytes);
cudaMemset(d_output, 0, output_bytes);
cudaProfilerStart();
cudaEventRecord(start);
// for(int i = 0; i < 10; i ++) {
// direct << < OC / CPB, WIDTH >> > ((__half2 * )
// d_transposed_input, (__half2 * )
// d_kernel_KFFC, d_output);
// }
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaProfilerStop();
cudaEventElapsedTime(&time,start,stop);
std::cout << "direct used " << time / 10<< std::endl;
cudaDeviceSynchronize();
float* h_direct_output = (float *)malloc(OC * IMAGE_DIM * IMAGE_DIM * 4);
float* h_direct_output_CHW = (float *)malloc(OC * IMAGE_DIM * IMAGE_DIM * 4);
#if HALF
__half * h_direct_output_h = (__half *)malloc(output_bytes);
cudaMemcpy(h_direct_output_h, d_output, output_bytes, cudaMemcpyDeviceToHost);
half2float(h_direct_output_h,h_direct_output,output_bytes / 2);
#else
cudaMemcpy(h_direct_output, d_output, output_bytes, cudaMemcpyDeviceToHost);
#endif
cudaDeviceSynchronize();
transpose(h_direct_output, h_direct_output_CHW, IMAGE_DIM * IMAGE_DIM, OC);
std::cout << h_direct_output_CHW[0] << std::endl;
cnpy::npy_save("direct_output.npy",&h_direct_output_CHW[0],{OC, IMAGE_DIM, IMAGE_DIM},"w");
cudaFree(d_output);
cudaMalloc(&d_output, output_bytes);
cudaMemset(d_output, 0, output_bytes);
runKernel(reinterpret_cast<CUdeviceptr>(d_input),reinterpret_cast<CUdeviceptr>(d_residual), reinterpret_cast<CUdeviceptr>(d_output));
float* h_output_kernel = (float *)malloc(OC * IMAGE_DIM * IMAGE_DIM * 4);
#if HALF
__half * h_output_kernel_h = (__half *)malloc(output_bytes);
cudaMemcpy(h_output_kernel_h, d_output, output_bytes, cudaMemcpyDeviceToHost);
half2float(h_output_kernel_h,h_output_kernel,output_bytes/2);
#else
cudaMemcpy(h_output_kernel, d_output, output_bytes, cudaMemcpyDeviceToHost);
#endif
cnpy::npy_save("kernel_output.npy",&h_output_kernel[0],{OC, IMAGE_DIM, IMAGE_DIM},"w");
std::cout << h_output_kernel[0] << std::endl;
std::cout << "Difference: " << get_diff(h_output,h_output_kernel,OC * IMAGE_DIM * IMAGE_DIM) << std::endl;
// Do something with h_output ...
delete[] h_output;
cudaFree(d_kernel);
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(kernel_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
cudnnDestroy(cudnn);
}
|
d60e75880495d4159cc889aab1d646030cbe6d3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
extern "C" __global__ void addVector(int* a, int aLen0, int* b, int bLen0, int* c, int cLen0)
{
int x = blockIdx.x;
c[x] = a[x] + b[x];
}
extern "C" __global__ void subVector(int* a, int aLen0, int* b, int bLen0, int* c, int cLen0)
{
int x = blockIdx.x;
c[x] = a[x] - b[x];
}
extern "C" __global__ void mulVector(int* a, int aLen0, int* b, int bLen0, int n)
{
int x = blockIdx.x;
b[x] = a[x] * n;
}
extern "C" __global__ void divVector(int* a, int aLen0, int* b, int bLen0, int n)
{
int x = blockIdx.x;
b[x] = a[x] / n;
}
| d60e75880495d4159cc889aab1d646030cbe6d3b.cu | #include <curand_kernel.h>
extern "C" __global__ void addVector(int* a, int aLen0, int* b, int bLen0, int* c, int cLen0)
{
int x = blockIdx.x;
c[x] = a[x] + b[x];
}
extern "C" __global__ void subVector(int* a, int aLen0, int* b, int bLen0, int* c, int cLen0)
{
int x = blockIdx.x;
c[x] = a[x] - b[x];
}
extern "C" __global__ void mulVector(int* a, int aLen0, int* b, int bLen0, int n)
{
int x = blockIdx.x;
b[x] = a[x] * n;
}
extern "C" __global__ void divVector(int* a, int aLen0, int* b, int bLen0, int n)
{
int x = blockIdx.x;
b[x] = a[x] / n;
}
|
180070e61e47fd6445296163cd25966478d40cb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/hip/TensorTopK.h>
#include <ATen/core/TensorBase.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/ScanUtils.cuh>
#include <ATen/hip/AsmUtils.cuh>
#include <ATen/hip/DeviceUtils.cuh>
#include <ATen/native/hip/SortingCommon.cuh>
#include <ATen/native/hip/SortingRadixSelect.cuh>
#include <ATen/native/hip/SortUtils.cuh>
#include <ATen/hip/cub.cuh>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <c10/macros/Macros.h>
using namespace at::native;
namespace at {
namespace native {
namespace sbtopk { // single_block_topk
template <typename T>
struct AddOp {
__device__ __forceinline__ T operator()(T const &lhs, T const &rhs) {
return (lhs + rhs);
}
};
template <typename T, typename IndexType, int Dim, bool WithKthValues>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void gatherTopK(at::cuda::detail::TensorInfo<T, IndexType> input,
IndexType inputSliceSize,
IndexType outputSliceSize, // aka `k`
bool largest,
IndexType numInputSlices,
IndexType inputWithinSliceStride,
at::cuda::detail::TensorInfo<T, IndexType> topK,
IndexType topKWithinSliceStride,
at::cuda::detail::TensorInfo<int64_t, IndexType> indices,
IndexType indicesWithinSliceStride,
T* kthValues) {
// Indices are limited to integer fp precision, so counts can fit in
// int32, regardless of IndexType
#if defined(USE_ROCM)
__shared__ int smem[64];
#else
__shared__ int smem[32]; // one per each warp, up to warp limit
#endif
IndexType slice = getLinearBlockId<IndexType>();
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
IndexType sliceStartIndex =
at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, input);
IndexType topKSliceStartIndex =
at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, topK);
IndexType indicesSliceStartIndex =
at::cuda::detail::IndexToOffset<int64_t, IndexType, Dim>::get(slice, indices);
T* inputSliceStart = &input.data[sliceStartIndex];
T* topKSliceStart = &topK.data[topKSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
// Find the k-th highest element in our input
T topKValue;
if (WithKthValues){
topKValue = kthValues[slice];
} else {
topKValue = static_cast<T>(0);
radixSelect<T, typename TopKTypeConfig<T>::RadixType, IndexType>(
inputSliceStart, outputSliceSize, largest,
inputSliceSize, inputWithinSliceStride,
smem, &topKValue);
}
const auto topKConverted = at::native::TopKTypeConfig<T>::convert(topKValue);
// Every value that is strictly less/greater than `pattern`
// (depending on sort dir) in sorted int format is in the top-K.
// The top-K value itself might not be unique.
//
// Since there are a variable number of elements that we see that
// are within the top-k, we don't know at what index to write out
// the resulting values.
// In order to get this, we perform an exclusive prefix sum of
// `hasTopK`. This will return the resulting index into which we
// need to write the result, if a thread has a result.
// All threads need to participate in the loop and the prefix sum,
// but not necessarily in the load; hence loop bounds being rounded
// up to a multiple of the block dim.
IndexType numIterations = round_up(inputSliceSize, (IndexType) blockDim.x);
IndexType writeIndexStart = 0;
for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v =
inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<T>(0);
const auto convertedV = at::native::TopKTypeConfig<T>::convert(v);
bool hasTopK;
if (largest) {
hasTopK = inRange && (convertedV > topKConverted);
} else {
hasTopK = inRange && (convertedV < topKConverted);
}
int index;
int carry;
at::cuda::exclusiveBinaryPrefixScan<int, true>(
smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK) {
int writeIndex = writeIndexStart + index;
CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize);
IndexType topKOffset = writeIndex * topKWithinSliceStride;
IndexType indexOffset = writeIndex * indicesWithinSliceStride;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
writeIndexStart += carry;
}
// We need to fill in the rest with actual == top-K values.
// The number that we need is outputSliceSize -
// writeIndexStart. There might be more than that number available,
// in which case we have to choose the first seen set. We do this
// via a prefix sum to calculate indices for writing results.
CUDA_KERNEL_ASSERT(outputSliceSize >= writeIndexStart);
IndexType topKRemaining = (outputSliceSize - writeIndexStart);
for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v =
inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<T>(0);
const auto convertedV = at::native::TopKTypeConfig<T>::convert(v);
bool hasTopK = inRange && (convertedV == topKConverted);
int index;
int carry;
at::cuda::exclusiveBinaryPrefixScan<int, true>(
smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK && index < topKRemaining) {
int writeIndex = writeIndexStart + index;
CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize);
IndexType topKOffset = writeIndex * topKWithinSliceStride;
IndexType indexOffset = writeIndex * indicesWithinSliceStride;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
if (carry >= topKRemaining) {
break;
}
topKRemaining -= carry;
writeIndexStart += carry;
}
};
template <typename T, typename IndexType, int Dim>
void launch(
at::cuda::detail::TensorInfo<T, IndexType> input,
IndexType inputSliceSize,
IndexType outputSliceSize, // aka `k`
bool largest,
IndexType numInputSlices,
IndexType inputWithinSliceStride,
at::cuda::detail::TensorInfo<T, IndexType> topK,
IndexType topKWithinSliceStride,
at::cuda::detail::TensorInfo<int64_t, IndexType> indices,
IndexType indicesWithinSliceStride) {
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(numInputSlices, grid), "Too many slices for topk");
int warp_size = at::cuda::warp_size();
dim3 block(::min(at::ceil_div((int64_t)inputSliceSize, (int64_t)warp_size) * (int64_t)warp_size, (int64_t)1024));
hipLaunchKernelGGL(( gatherTopK<T, IndexType, Dim, /* WithKthValues= */false>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input,
inputSliceSize,
outputSliceSize,
largest,
numInputSlices,
inputWithinSliceStride,
topK,
topKWithinSliceStride,
indices,
indicesWithinSliceStride,
nullptr);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} // namespace sbtopk
namespace mbtopk { // multi_block_topk
constexpr int BLOCK_THREADS = 256;
// Over what radix we are selecting values
constexpr int RADIX_BITS = 8;
constexpr int RADIX_DIGITS = 1 << RADIX_BITS; // 2 ^ RADIX_BITS
constexpr int RADIX_MASK = (RADIX_DIGITS - 1);
static_assert(RADIX_DIGITS <= BLOCK_THREADS, "radixFindKthValues kernel requires RADIX_DIGITS <= BLOCK_THREADS");
template <typename T, typename IndexType>
__global__ void fill(T* x, T value, IndexType size) {
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
for (IndexType i = idx; i < size; i += gridDim.x * blockDim.x) {
x[i] = value;
}
}
// find the kth smallest value,
// for largest topk, k_to_find = slice_size - k + 1
template <typename T, typename IndexType, typename Bitwise, int Dim>
C10_LAUNCH_BOUNDS_1(BLOCK_THREADS)
__global__ void radixFindKthValues(
at::cuda::detail::TensorInfo<T, IndexType> input,
IndexType slice_size,
IndexType* ks_to_find, // size: num_slices
IndexType num_slices,
IndexType withinSliceStride,
int current_bit,
int items_per_thread,
IndexType blocks_per_slice,
Bitwise desiredMask,
// outputs
uint32_t* semaphores, // size: num_slices
Bitwise* desires, // size: num_slices
IndexType* counts, // size: num_slices * blocks_per_slice * radix_digits
T* kthValues // size: num_slices, only write when current_bit reaches 0
) {
int items_per_block = items_per_thread * BLOCK_THREADS;
int tidx = threadIdx.x;
IndexType block_idx = getLinearBlockId<IndexType>();
IndexType slice_idx = block_idx / blocks_per_slice;
IndexType blk_idx_in_slice = block_idx % blocks_per_slice;
if (slice_idx >= num_slices) {
return;
}
Bitwise desired = desires[slice_idx];
IndexType k_to_find = ks_to_find[slice_idx];
IndexType slice_start_index = at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice_idx, input);
T* data = &input.data[slice_start_index];
typedef hipcub::BlockScan<IndexType, BLOCK_THREADS> BlockScan;
union __align__(16) TempStorage {
uint32_t digit_counters[RADIX_DIGITS];
IndexType digit_count_cumsum[RADIX_DIGITS]; // only used if this it the last block for this slice
typename BlockScan::TempStorage scan_storage;
};
__shared__ TempStorage temp_storage;
// fill digit_counters with zeros
if (tidx < RADIX_DIGITS) {
temp_storage.digit_counters[tidx] = 0;
}
__syncthreads();
items_per_thread = (blk_idx_in_slice + 1 < blocks_per_slice)
? items_per_thread
: at::ceil_div((int64_t)(slice_size - blk_idx_in_slice * items_per_block), (int64_t)BLOCK_THREADS);
// collect digit counts and store in shared memorey
for (int i = 0; i < items_per_thread; ++i) {
// Find the start offset for this slice
IndexType idx = blk_idx_in_slice * items_per_block + i * BLOCK_THREADS + tidx;
if (idx < slice_size) {
idx *= withinSliceStride;
Bitwise val = TopKTypeConfig<T>::convert(doLdg(&data[idx]));
bool has_val = ((val & desiredMask) == (desired & desiredMask));
Bitwise digit = at::cuda::Bitfield<Bitwise>::getBitfield(val, current_bit, RADIX_BITS);
if (has_val) {
atomicAdd(&temp_storage.digit_counters[digit], 1);
}
}
}
__syncthreads();
// load digit counter to register, one digit per thread
static_assert(RADIX_DIGITS <= BLOCK_THREADS, "this kernel requires RADIX_DIGITS <= BLOCK_THREADS");
IndexType digit_count = 0;
if (tidx < RADIX_DIGITS) {
digit_count = temp_storage.digit_counters[tidx];
}
// if blocks_per_slice == 1, there is no need to do cross-block reduction
// in this case counts saved at registers instead of global memory
if (blocks_per_slice > 1) {
if (tidx < RADIX_DIGITS) {
counts[block_idx * RADIX_DIGITS + tidx] = digit_count;
}
__threadfence(); // make sure writes are globally visible
__syncthreads(); // make sure all writes are finished before update semaphores
}
// the last block of each slice accumulates counters from multiple blocks and updates desired and ks_to_find
__shared__ bool s_is_last_block_done;
if (tidx == 0) {
if (blocks_per_slice == 1) {
s_is_last_block_done = true;
} else {
uint32_t blocks_finished_old = atomicAdd(&semaphores[slice_idx], 1);
s_is_last_block_done = (blocks_finished_old == blocks_per_slice - 1);
}
}
__syncthreads();
if (!s_is_last_block_done)
return;
// accumulates counters from multiple blocks
if (tidx < RADIX_DIGITS && blocks_per_slice > 1) {
digit_count = 0;
for (int blk = 0; blk < blocks_per_slice; ++blk) {
digit_count += counts[(slice_idx * blocks_per_slice + blk) * RADIX_DIGITS + tidx];
}
}
// compute the block-wide inclusive prefix sum
IndexType digit_count_cumsum;
BlockScan(temp_storage.scan_storage).InclusiveSum(digit_count, digit_count_cumsum);
__syncthreads();
// every thread also need the perfix_sum of it's left value for comparison, so save a copy in shared mem
if (tidx < RADIX_DIGITS) {
temp_storage.digit_count_cumsum[tidx] = digit_count_cumsum;
}
__syncthreads();
if (tidx < RADIX_DIGITS) {
IndexType digit_count_cumsum_left = (tidx == 0) ? 0 : temp_storage.digit_count_cumsum[tidx - 1];
// if not the last pass: update desired and ks_to_find
// if last pass: write out the kth value
if (digit_count_cumsum_left < k_to_find && k_to_find <= digit_count_cumsum) {
desired = at::cuda::Bitfield<Bitwise>::setBitfield(desired, tidx, current_bit, RADIX_BITS);
if (current_bit > 0) {
desires[slice_idx] = desired;
ks_to_find[slice_idx] = k_to_find - digit_count_cumsum_left;
} else {
kthValues[slice_idx] = TopKTypeConfig<T>::deconvert(desired);
}
}
}
// reset semaphores for the next pass
if (tidx == 0) {
semaphores[slice_idx] = 0;
}
};
int get_items_per_thread(uint64_t num_slices, uint64_t slice_size) {
// occupancy of this kernel is limited by registers per threads
constexpr int REGS_PER_THREAD = 40; // from nsight launch statistics
constexpr int REGS_PER_BLOCK = REGS_PER_THREAD * BLOCK_THREADS;
hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties();
int mpc = prop->multiProcessorCount;
#if defined(USE_ROCM)
int regs_per_mp = prop->regsPerBlock;
int max_blocks_per_mp = 32;
#else
int regs_per_mp = prop->regsPerMultiprocessor;
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000
int max_blocks_per_mp = prop->maxBlocksPerMultiProcessor;
#else
int max_blocks_per_mp = 32;
#endif
#endif
int blocks_per_mp = ::min(regs_per_mp / REGS_PER_BLOCK, max_blocks_per_mp);
int64_t items_per_thread = at::ceil_div((int64_t)(slice_size * num_slices), (int64_t)(mpc * blocks_per_mp * BLOCK_THREADS));
items_per_thread = ::max(4, ::min((int)items_per_thread, 64)); // clamp to (4, 64)
return items_per_thread;
}
template <typename T, typename IndexType, int Dim>
void launch(
at::cuda::detail::TensorInfo<T, IndexType> input,
IndexType inputSliceSize,
IndexType outputSliceSize, // aka `k`
bool largest,
IndexType numInputSlices,
IndexType inputWithinSliceStride,
at::cuda::detail::TensorInfo<T, IndexType> topK,
IndexType topKWithinSliceStride,
at::cuda::detail::TensorInfo<int64_t, IndexType> indices,
IndexType indicesWithinSliceStride) {
// configure items_per_thread based on device architecture and input size
int items_per_thread = get_items_per_thread(numInputSlices, inputSliceSize);
int items_per_block = items_per_thread * BLOCK_THREADS;
using Bitwise = typename TopKTypeConfig<T>::RadixType;
int64_t blocks_per_slice = at::ceil_div((int64_t)inputSliceSize, (int64_t)items_per_block);
int64_t num_blocks = numInputSlices * blocks_per_slice;
// temporary storage
auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
auto kthValues_buffer = allocator.allocate(numInputSlices * sizeof(T));
T* kthValues = reinterpret_cast<T*>(kthValues_buffer.get());
TORCH_CHECK(blocks_per_slice <= std::numeric_limits<uint32_t>::max(), "blocks_per_slice larger than uint32 maximum is not supported");
auto semaphores_buffer = allocator.allocate(numInputSlices * sizeof(uint32_t));
uint32_t* semaphores = reinterpret_cast<uint32_t*>(semaphores_buffer.get());
AT_CUDA_CHECK(hipMemsetAsync(semaphores, 0, numInputSlices * sizeof(uint32_t), c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()));
auto ks_to_find_buffer = allocator.allocate(numInputSlices * sizeof(IndexType));
IndexType* ks_to_find = reinterpret_cast<IndexType*>(ks_to_find_buffer.get());
IndexType k_to_find = largest ? inputSliceSize - outputSliceSize + 1: outputSliceSize;
hipLaunchKernelGGL(( fill<IndexType>), dim3(::min((numInputSlices + 511) / 512, (IndexType)65535)), dim3(512), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
ks_to_find, k_to_find, numInputSlices);
C10_HIP_KERNEL_LAUNCH_CHECK();
auto desired_buffer = allocator.allocate(numInputSlices * sizeof(Bitwise));
Bitwise* desired = reinterpret_cast<Bitwise*>(desired_buffer.get());
auto counts_buffer = allocator.allocate(num_blocks * RADIX_DIGITS * sizeof(IndexType));
IndexType* counts = reinterpret_cast<IndexType*>(counts_buffer.get());
Bitwise desiredMask = 0;
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(num_blocks, grid), "Too many slices for topk");
dim3 block(BLOCK_THREADS);
// iterate radix bits for multiple passes
for (int current_bit = sizeof(T) * 8 - RADIX_BITS; current_bit >= 0; current_bit -= RADIX_BITS) {
hipLaunchKernelGGL(( radixFindKthValues<T, IndexType, Bitwise, Dim>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input,
inputSliceSize,
ks_to_find,
numInputSlices,
inputWithinSliceStride,
current_bit,
items_per_thread,
blocks_per_slice,
desiredMask,
semaphores,
desired,
counts,
kthValues);
C10_HIP_KERNEL_LAUNCH_CHECK();
desiredMask = at::cuda::Bitfield<Bitwise>::setBitfield(desiredMask, RADIX_MASK, current_bit, RADIX_BITS);
}
// Find topk values based on kth values
{
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(numInputSlices, grid), "Too many slices for topk");
int warp_size = at::cuda::warp_size();
dim3 block(::min(at::ceil_div((int64_t)inputSliceSize, (int64_t)warp_size) * (int64_t)warp_size, (int64_t)1024));
hipLaunchKernelGGL(( sbtopk::gatherTopK<T, IndexType, Dim, /* WithKthValues= */true>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input,
inputSliceSize,
outputSliceSize,
largest,
numInputSlices,
inputWithinSliceStride,
topK,
topKWithinSliceStride,
indices,
indicesWithinSliceStride,
kthValues);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
} // namespace mbtopk
bool should_use_multiblock(int64_t num_slices, int64_t slice_size) {
// This heuristics is based on the experiment in https://github.com/pytorch/pytorch/pull/71081
return (num_slices <= 400 && slice_size >= 5000) ||
(num_slices >= 400 && num_slices < 4000 && slice_size >= 1000) ||
(num_slices >= 4000 && slice_size >= 300);
}
void launch_gather_topk_kernel(
const TensorBase& self, int64_t k, int64_t dim, bool largest,
const TensorBase& values, const TensorBase& indices) {
int numDims = self.dim();
numDims = numDims == 0 ? 1 : numDims;
TORCH_CHECK(numDims <= MAX_DIMS, "input tensor has too many dimensions");
int64_t sliceSize = self.dim() == 0 ? 1 : self.size(dim);
auto input = self.contiguous();
// static_cast is required to ensure that the correct type (INDEX_T)
// is provided to the kernel for the arguments.
#define RUN_K(INDEX_T, DIM, LAUNCH_FUNCTION_NAME) \
LAUNCH_FUNCTION_NAME<scalar_t, INDEX_T, DIM>( \
inputInfo, \
static_cast<INDEX_T>(sliceSize), \
static_cast<INDEX_T>(k), \
largest, \
static_cast<INDEX_T>(numInputSlices), \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \
topKInfo, \
static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \
indicesInfo, \
static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim]));
#define RUN_MB(INDEX_T, DIM) \
if (should_use_multiblock(numInputSlices, sliceSize)) { \
RUN_K(INDEX_T, DIM, mbtopk::launch); \
} else { \
RUN_K(INDEX_T, DIM, sbtopk::launch); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_MB(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_MB(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_MB(INDEX_T, 3); \
} else { \
RUN_MB(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "topk_out_cuda", [&] { \
at::cuda::detail::TensorInfo<scalar_t, INDEX_T> inputInfo = \
at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(input); \
at::cuda::detail::TensorInfo<scalar_t, INDEX_T> topKInfo = \
at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(values); \
at::cuda::detail::TensorInfo<int64_t, INDEX_T> indicesInfo = \
at::cuda::detail::getTensorInfo<int64_t, INDEX_T>(indices); \
/* tensorInfoLegacyIfScalar*/ \
if (!input.dim()) { \
inputInfo.dims = 1; \
inputInfo.sizes[0] = 1; \
inputInfo.strides[0] = 1; \
topKInfo.dims = 1; \
topKInfo.sizes[0] = 1; \
topKInfo.strides[0] = 1; \
indicesInfo.dims = 1; \
indicesInfo.sizes[0] = 1; \
indicesInfo.strides[0] = 1; \
} \
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
/* stash the stride of dim because it can be accidentally collapsed */ \
auto strideTopK = topKInfo.strides[dim]; \
auto strideIndices = indicesInfo.strides[dim]; \
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
/* restore stride in case it was collapsed */ \
topKInfo.strides[collapseTopKDim] = strideTopK; \
indicesInfo.strides[collapseIndicesDim] = strideIndices; \
int64_t numInputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
numInputSlices *= inputInfo.sizes[i]; \
} \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T); \
});
// the below is safe with 0-dimensional tensors because it is based on
// TensorInfo which implicitly expands to 1-dimensional.
if (input.numel() > 0) {
// Based on required index size, run the algorithm with the
// appropriate index type
if (at::cuda::detail::canUse32BitIndexMath(input) &&
at::cuda::detail::canUse32BitIndexMath(values) &&
at::cuda::detail::canUse32BitIndexMath(indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_K
}
} // at::native
} // at
| 180070e61e47fd6445296163cd25966478d40cb7.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/cuda/TensorTopK.h>
#include <ATen/core/TensorBase.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/cuda/ScanUtils.cuh>
#include <ATen/cuda/AsmUtils.cuh>
#include <ATen/cuda/DeviceUtils.cuh>
#include <ATen/native/cuda/SortingCommon.cuh>
#include <ATen/native/cuda/SortingRadixSelect.cuh>
#include <ATen/native/cuda/SortUtils.cuh>
#include <ATen/cuda/cub.cuh>
#include <c10/cuda/CUDACachingAllocator.h>
#include <c10/macros/Macros.h>
using namespace at::native;
namespace at {
namespace native {
namespace sbtopk { // single_block_topk
template <typename T>
struct AddOp {
__device__ __forceinline__ T operator()(T const &lhs, T const &rhs) {
return (lhs + rhs);
}
};
template <typename T, typename IndexType, int Dim, bool WithKthValues>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void gatherTopK(at::cuda::detail::TensorInfo<T, IndexType> input,
IndexType inputSliceSize,
IndexType outputSliceSize, // aka `k`
bool largest,
IndexType numInputSlices,
IndexType inputWithinSliceStride,
at::cuda::detail::TensorInfo<T, IndexType> topK,
IndexType topKWithinSliceStride,
at::cuda::detail::TensorInfo<int64_t, IndexType> indices,
IndexType indicesWithinSliceStride,
T* kthValues) {
// Indices are limited to integer fp precision, so counts can fit in
// int32, regardless of IndexType
#if defined(USE_ROCM)
__shared__ int smem[64];
#else
__shared__ int smem[32]; // one per each warp, up to warp limit
#endif
IndexType slice = getLinearBlockId<IndexType>();
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
IndexType sliceStartIndex =
at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, input);
IndexType topKSliceStartIndex =
at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, topK);
IndexType indicesSliceStartIndex =
at::cuda::detail::IndexToOffset<int64_t, IndexType, Dim>::get(slice, indices);
T* inputSliceStart = &input.data[sliceStartIndex];
T* topKSliceStart = &topK.data[topKSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
// Find the k-th highest element in our input
T topKValue;
if (WithKthValues){
topKValue = kthValues[slice];
} else {
topKValue = static_cast<T>(0);
radixSelect<T, typename TopKTypeConfig<T>::RadixType, IndexType>(
inputSliceStart, outputSliceSize, largest,
inputSliceSize, inputWithinSliceStride,
smem, &topKValue);
}
const auto topKConverted = at::native::TopKTypeConfig<T>::convert(topKValue);
// Every value that is strictly less/greater than `pattern`
// (depending on sort dir) in sorted int format is in the top-K.
// The top-K value itself might not be unique.
//
// Since there are a variable number of elements that we see that
// are within the top-k, we don't know at what index to write out
// the resulting values.
// In order to get this, we perform an exclusive prefix sum of
// `hasTopK`. This will return the resulting index into which we
// need to write the result, if a thread has a result.
// All threads need to participate in the loop and the prefix sum,
// but not necessarily in the load; hence loop bounds being rounded
// up to a multiple of the block dim.
IndexType numIterations = round_up(inputSliceSize, (IndexType) blockDim.x);
IndexType writeIndexStart = 0;
for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v =
inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<T>(0);
const auto convertedV = at::native::TopKTypeConfig<T>::convert(v);
bool hasTopK;
if (largest) {
hasTopK = inRange && (convertedV > topKConverted);
} else {
hasTopK = inRange && (convertedV < topKConverted);
}
int index;
int carry;
at::cuda::exclusiveBinaryPrefixScan<int, true>(
smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK) {
int writeIndex = writeIndexStart + index;
CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize);
IndexType topKOffset = writeIndex * topKWithinSliceStride;
IndexType indexOffset = writeIndex * indicesWithinSliceStride;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
writeIndexStart += carry;
}
// We need to fill in the rest with actual == top-K values.
// The number that we need is outputSliceSize -
// writeIndexStart. There might be more than that number available,
// in which case we have to choose the first seen set. We do this
// via a prefix sum to calculate indices for writing results.
CUDA_KERNEL_ASSERT(outputSliceSize >= writeIndexStart);
IndexType topKRemaining = (outputSliceSize - writeIndexStart);
for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v =
inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<T>(0);
const auto convertedV = at::native::TopKTypeConfig<T>::convert(v);
bool hasTopK = inRange && (convertedV == topKConverted);
int index;
int carry;
at::cuda::exclusiveBinaryPrefixScan<int, true>(
smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK && index < topKRemaining) {
int writeIndex = writeIndexStart + index;
CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize);
IndexType topKOffset = writeIndex * topKWithinSliceStride;
IndexType indexOffset = writeIndex * indicesWithinSliceStride;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
if (carry >= topKRemaining) {
break;
}
topKRemaining -= carry;
writeIndexStart += carry;
}
};
template <typename T, typename IndexType, int Dim>
void launch(
at::cuda::detail::TensorInfo<T, IndexType> input,
IndexType inputSliceSize,
IndexType outputSliceSize, // aka `k`
bool largest,
IndexType numInputSlices,
IndexType inputWithinSliceStride,
at::cuda::detail::TensorInfo<T, IndexType> topK,
IndexType topKWithinSliceStride,
at::cuda::detail::TensorInfo<int64_t, IndexType> indices,
IndexType indicesWithinSliceStride) {
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(numInputSlices, grid), "Too many slices for topk");
int warp_size = at::cuda::warp_size();
dim3 block(std::min(at::ceil_div((int64_t)inputSliceSize, (int64_t)warp_size) * (int64_t)warp_size, (int64_t)1024));
gatherTopK<T, IndexType, Dim, /* WithKthValues= */false><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
input,
inputSliceSize,
outputSliceSize,
largest,
numInputSlices,
inputWithinSliceStride,
topK,
topKWithinSliceStride,
indices,
indicesWithinSliceStride,
nullptr);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} // namespace sbtopk
namespace mbtopk { // multi_block_topk
constexpr int BLOCK_THREADS = 256;
// Over what radix we are selecting values
constexpr int RADIX_BITS = 8;
constexpr int RADIX_DIGITS = 1 << RADIX_BITS; // 2 ^ RADIX_BITS
constexpr int RADIX_MASK = (RADIX_DIGITS - 1);
static_assert(RADIX_DIGITS <= BLOCK_THREADS, "radixFindKthValues kernel requires RADIX_DIGITS <= BLOCK_THREADS");
template <typename T, typename IndexType>
__global__ void fill(T* x, T value, IndexType size) {
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
for (IndexType i = idx; i < size; i += gridDim.x * blockDim.x) {
x[i] = value;
}
}
// find the kth smallest value,
// for largest topk, k_to_find = slice_size - k + 1
template <typename T, typename IndexType, typename Bitwise, int Dim>
C10_LAUNCH_BOUNDS_1(BLOCK_THREADS)
__global__ void radixFindKthValues(
at::cuda::detail::TensorInfo<T, IndexType> input,
IndexType slice_size,
IndexType* ks_to_find, // size: num_slices
IndexType num_slices,
IndexType withinSliceStride,
int current_bit,
int items_per_thread,
IndexType blocks_per_slice,
Bitwise desiredMask,
// outputs
uint32_t* semaphores, // size: num_slices
Bitwise* desires, // size: num_slices
IndexType* counts, // size: num_slices * blocks_per_slice * radix_digits
T* kthValues // size: num_slices, only write when current_bit reaches 0
) {
int items_per_block = items_per_thread * BLOCK_THREADS;
int tidx = threadIdx.x;
IndexType block_idx = getLinearBlockId<IndexType>();
IndexType slice_idx = block_idx / blocks_per_slice;
IndexType blk_idx_in_slice = block_idx % blocks_per_slice;
if (slice_idx >= num_slices) {
return;
}
Bitwise desired = desires[slice_idx];
IndexType k_to_find = ks_to_find[slice_idx];
IndexType slice_start_index = at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice_idx, input);
T* data = &input.data[slice_start_index];
typedef cub::BlockScan<IndexType, BLOCK_THREADS> BlockScan;
union __align__(16) TempStorage {
uint32_t digit_counters[RADIX_DIGITS];
IndexType digit_count_cumsum[RADIX_DIGITS]; // only used if this it the last block for this slice
typename BlockScan::TempStorage scan_storage;
};
__shared__ TempStorage temp_storage;
// fill digit_counters with zeros
if (tidx < RADIX_DIGITS) {
temp_storage.digit_counters[tidx] = 0;
}
__syncthreads();
items_per_thread = (blk_idx_in_slice + 1 < blocks_per_slice)
? items_per_thread
: at::ceil_div((int64_t)(slice_size - blk_idx_in_slice * items_per_block), (int64_t)BLOCK_THREADS);
// collect digit counts and store in shared memorey
for (int i = 0; i < items_per_thread; ++i) {
// Find the start offset for this slice
IndexType idx = blk_idx_in_slice * items_per_block + i * BLOCK_THREADS + tidx;
if (idx < slice_size) {
idx *= withinSliceStride;
Bitwise val = TopKTypeConfig<T>::convert(doLdg(&data[idx]));
bool has_val = ((val & desiredMask) == (desired & desiredMask));
Bitwise digit = at::cuda::Bitfield<Bitwise>::getBitfield(val, current_bit, RADIX_BITS);
if (has_val) {
atomicAdd(&temp_storage.digit_counters[digit], 1);
}
}
}
__syncthreads();
// load digit counter to register, one digit per thread
static_assert(RADIX_DIGITS <= BLOCK_THREADS, "this kernel requires RADIX_DIGITS <= BLOCK_THREADS");
IndexType digit_count = 0;
if (tidx < RADIX_DIGITS) {
digit_count = temp_storage.digit_counters[tidx];
}
// if blocks_per_slice == 1, there is no need to do cross-block reduction
// in this case counts saved at registers instead of global memory
if (blocks_per_slice > 1) {
if (tidx < RADIX_DIGITS) {
counts[block_idx * RADIX_DIGITS + tidx] = digit_count;
}
__threadfence(); // make sure writes are globally visible
__syncthreads(); // make sure all writes are finished before update semaphores
}
// the last block of each slice accumulates counters from multiple blocks and updates desired and ks_to_find
__shared__ bool s_is_last_block_done;
if (tidx == 0) {
if (blocks_per_slice == 1) {
s_is_last_block_done = true;
} else {
uint32_t blocks_finished_old = atomicAdd(&semaphores[slice_idx], 1);
s_is_last_block_done = (blocks_finished_old == blocks_per_slice - 1);
}
}
__syncthreads();
if (!s_is_last_block_done)
return;
// accumulates counters from multiple blocks
if (tidx < RADIX_DIGITS && blocks_per_slice > 1) {
digit_count = 0;
for (int blk = 0; blk < blocks_per_slice; ++blk) {
digit_count += counts[(slice_idx * blocks_per_slice + blk) * RADIX_DIGITS + tidx];
}
}
// compute the block-wide inclusive prefix sum
IndexType digit_count_cumsum;
BlockScan(temp_storage.scan_storage).InclusiveSum(digit_count, digit_count_cumsum);
__syncthreads();
// every thread also need the perfix_sum of it's left value for comparison, so save a copy in shared mem
if (tidx < RADIX_DIGITS) {
temp_storage.digit_count_cumsum[tidx] = digit_count_cumsum;
}
__syncthreads();
if (tidx < RADIX_DIGITS) {
IndexType digit_count_cumsum_left = (tidx == 0) ? 0 : temp_storage.digit_count_cumsum[tidx - 1];
// if not the last pass: update desired and ks_to_find
// if last pass: write out the kth value
if (digit_count_cumsum_left < k_to_find && k_to_find <= digit_count_cumsum) {
desired = at::cuda::Bitfield<Bitwise>::setBitfield(desired, tidx, current_bit, RADIX_BITS);
if (current_bit > 0) {
desires[slice_idx] = desired;
ks_to_find[slice_idx] = k_to_find - digit_count_cumsum_left;
} else {
kthValues[slice_idx] = TopKTypeConfig<T>::deconvert(desired);
}
}
}
// reset semaphores for the next pass
if (tidx == 0) {
semaphores[slice_idx] = 0;
}
};
int get_items_per_thread(uint64_t num_slices, uint64_t slice_size) {
// occupancy of this kernel is limited by registers per threads
constexpr int REGS_PER_THREAD = 40; // from nsight launch statistics
constexpr int REGS_PER_BLOCK = REGS_PER_THREAD * BLOCK_THREADS;
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
int mpc = prop->multiProcessorCount;
#if defined(USE_ROCM)
int regs_per_mp = prop->regsPerBlock;
int max_blocks_per_mp = 32;
#else
int regs_per_mp = prop->regsPerMultiprocessor;
#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000
int max_blocks_per_mp = prop->maxBlocksPerMultiProcessor;
#else
int max_blocks_per_mp = 32;
#endif
#endif
int blocks_per_mp = std::min(regs_per_mp / REGS_PER_BLOCK, max_blocks_per_mp);
int64_t items_per_thread = at::ceil_div((int64_t)(slice_size * num_slices), (int64_t)(mpc * blocks_per_mp * BLOCK_THREADS));
items_per_thread = std::max(4, std::min((int)items_per_thread, 64)); // clamp to (4, 64)
return items_per_thread;
}
template <typename T, typename IndexType, int Dim>
void launch(
at::cuda::detail::TensorInfo<T, IndexType> input,
IndexType inputSliceSize,
IndexType outputSliceSize, // aka `k`
bool largest,
IndexType numInputSlices,
IndexType inputWithinSliceStride,
at::cuda::detail::TensorInfo<T, IndexType> topK,
IndexType topKWithinSliceStride,
at::cuda::detail::TensorInfo<int64_t, IndexType> indices,
IndexType indicesWithinSliceStride) {
// configure items_per_thread based on device architecture and input size
int items_per_thread = get_items_per_thread(numInputSlices, inputSliceSize);
int items_per_block = items_per_thread * BLOCK_THREADS;
using Bitwise = typename TopKTypeConfig<T>::RadixType;
int64_t blocks_per_slice = at::ceil_div((int64_t)inputSliceSize, (int64_t)items_per_block);
int64_t num_blocks = numInputSlices * blocks_per_slice;
// temporary storage
auto& allocator = *c10::cuda::CUDACachingAllocator::get();
auto kthValues_buffer = allocator.allocate(numInputSlices * sizeof(T));
T* kthValues = reinterpret_cast<T*>(kthValues_buffer.get());
TORCH_CHECK(blocks_per_slice <= std::numeric_limits<uint32_t>::max(), "blocks_per_slice larger than uint32 maximum is not supported");
auto semaphores_buffer = allocator.allocate(numInputSlices * sizeof(uint32_t));
uint32_t* semaphores = reinterpret_cast<uint32_t*>(semaphores_buffer.get());
AT_CUDA_CHECK(cudaMemsetAsync(semaphores, 0, numInputSlices * sizeof(uint32_t), c10::cuda::getCurrentCUDAStream()));
auto ks_to_find_buffer = allocator.allocate(numInputSlices * sizeof(IndexType));
IndexType* ks_to_find = reinterpret_cast<IndexType*>(ks_to_find_buffer.get());
IndexType k_to_find = largest ? inputSliceSize - outputSliceSize + 1: outputSliceSize;
fill<IndexType><<<std::min((numInputSlices + 511) / 512, (IndexType)65535), 512, 0, c10::cuda::getCurrentCUDAStream()>>>(
ks_to_find, k_to_find, numInputSlices);
C10_CUDA_KERNEL_LAUNCH_CHECK();
auto desired_buffer = allocator.allocate(numInputSlices * sizeof(Bitwise));
Bitwise* desired = reinterpret_cast<Bitwise*>(desired_buffer.get());
auto counts_buffer = allocator.allocate(num_blocks * RADIX_DIGITS * sizeof(IndexType));
IndexType* counts = reinterpret_cast<IndexType*>(counts_buffer.get());
Bitwise desiredMask = 0;
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(num_blocks, grid), "Too many slices for topk");
dim3 block(BLOCK_THREADS);
// iterate radix bits for multiple passes
for (int current_bit = sizeof(T) * 8 - RADIX_BITS; current_bit >= 0; current_bit -= RADIX_BITS) {
radixFindKthValues<T, IndexType, Bitwise, Dim><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
input,
inputSliceSize,
ks_to_find,
numInputSlices,
inputWithinSliceStride,
current_bit,
items_per_thread,
blocks_per_slice,
desiredMask,
semaphores,
desired,
counts,
kthValues);
C10_CUDA_KERNEL_LAUNCH_CHECK();
desiredMask = at::cuda::Bitfield<Bitwise>::setBitfield(desiredMask, RADIX_MASK, current_bit, RADIX_BITS);
}
// Find topk values based on kth values
{
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(numInputSlices, grid), "Too many slices for topk");
int warp_size = at::cuda::warp_size();
dim3 block(std::min(at::ceil_div((int64_t)inputSliceSize, (int64_t)warp_size) * (int64_t)warp_size, (int64_t)1024));
sbtopk::gatherTopK<T, IndexType, Dim, /* WithKthValues= */true><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
input,
inputSliceSize,
outputSliceSize,
largest,
numInputSlices,
inputWithinSliceStride,
topK,
topKWithinSliceStride,
indices,
indicesWithinSliceStride,
kthValues);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
} // namespace mbtopk
bool should_use_multiblock(int64_t num_slices, int64_t slice_size) {
// This heuristics is based on the experiment in https://github.com/pytorch/pytorch/pull/71081
return (num_slices <= 400 && slice_size >= 5000) ||
(num_slices >= 400 && num_slices < 4000 && slice_size >= 1000) ||
(num_slices >= 4000 && slice_size >= 300);
}
void launch_gather_topk_kernel(
const TensorBase& self, int64_t k, int64_t dim, bool largest,
const TensorBase& values, const TensorBase& indices) {
int numDims = self.dim();
numDims = numDims == 0 ? 1 : numDims;
TORCH_CHECK(numDims <= MAX_DIMS, "input tensor has too many dimensions");
int64_t sliceSize = self.dim() == 0 ? 1 : self.size(dim);
auto input = self.contiguous();
// static_cast is required to ensure that the correct type (INDEX_T)
// is provided to the kernel for the arguments.
#define RUN_K(INDEX_T, DIM, LAUNCH_FUNCTION_NAME) \
LAUNCH_FUNCTION_NAME<scalar_t, INDEX_T, DIM>( \
inputInfo, \
static_cast<INDEX_T>(sliceSize), \
static_cast<INDEX_T>(k), \
largest, \
static_cast<INDEX_T>(numInputSlices), \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \
topKInfo, \
static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \
indicesInfo, \
static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim]));
#define RUN_MB(INDEX_T, DIM) \
if (should_use_multiblock(numInputSlices, sliceSize)) { \
RUN_K(INDEX_T, DIM, mbtopk::launch); \
} else { \
RUN_K(INDEX_T, DIM, sbtopk::launch); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_MB(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_MB(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_MB(INDEX_T, 3); \
} else { \
RUN_MB(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "topk_out_cuda", [&] { \
at::cuda::detail::TensorInfo<scalar_t, INDEX_T> inputInfo = \
at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(input); \
at::cuda::detail::TensorInfo<scalar_t, INDEX_T> topKInfo = \
at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(values); \
at::cuda::detail::TensorInfo<int64_t, INDEX_T> indicesInfo = \
at::cuda::detail::getTensorInfo<int64_t, INDEX_T>(indices); \
/* tensorInfoLegacyIfScalar*/ \
if (!input.dim()) { \
inputInfo.dims = 1; \
inputInfo.sizes[0] = 1; \
inputInfo.strides[0] = 1; \
topKInfo.dims = 1; \
topKInfo.sizes[0] = 1; \
topKInfo.strides[0] = 1; \
indicesInfo.dims = 1; \
indicesInfo.sizes[0] = 1; \
indicesInfo.strides[0] = 1; \
} \
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
/* stash the stride of dim because it can be accidentally collapsed */ \
auto strideTopK = topKInfo.strides[dim]; \
auto strideIndices = indicesInfo.strides[dim]; \
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
/* restore stride in case it was collapsed */ \
topKInfo.strides[collapseTopKDim] = strideTopK; \
indicesInfo.strides[collapseIndicesDim] = strideIndices; \
int64_t numInputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
numInputSlices *= inputInfo.sizes[i]; \
} \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T); \
});
// the below is safe with 0-dimensional tensors because it is based on
// TensorInfo which implicitly expands to 1-dimensional.
if (input.numel() > 0) {
// Based on required index size, run the algorithm with the
// appropriate index type
if (at::cuda::detail::canUse32BitIndexMath(input) &&
at::cuda::detail::canUse32BitIndexMath(values) &&
at::cuda::detail::canUse32BitIndexMath(indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_K
}
} // at::native
} // at
|
216af52319a672767f42de76d6552057afefec1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define HISTOGRAM_LENGTH 256
#define BLOCK_SIZE_1 16
#define BLOCK_SIZE_2 512
__constant__ float HIST_CDF[HISTOGRAM_LENGTH];
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__device__ float clamp(float x, float start, float end){
return min(max(x, start), end);
}
__device__ float correct_color(unsigned char val,float cdfmin){
return clamp(255*(HIST_CDF[val] - cdfmin)/(1.0-cdfmin),0.0,255.0);
}
__global__ void float2uchar(float* input, unsigned char* output, int z_size, int y_size, int x_size){
int tx = threadIdx.x, ty = threadIdx.y, tz = threadIdx.z;
int bx = blockIdx.x, by = blockIdx.y, bz = blockIdx.z;
int i = bz * blockDim.z + tz;
int j = by * blockDim.y + ty;
int k = bx * blockDim.x + tx;
float temp;
unsigned char assigned_val;
if(i < z_size && j < y_size && k < x_size){
temp = input[i*y_size*x_size + j*x_size + k];
assigned_val = (unsigned char)(255 * temp);
output[i*y_size*x_size + j*x_size + k] = assigned_val;
}
}
__global__ void RGB2Gray(unsigned char* input, unsigned char *output, int height, int width){
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
int i = by * blockDim.y + ty;
int j = bx * blockDim.x + tx;
int idx;
unsigned char r,g,b;
unsigned char assigned_value;
if(i < height && j < width){
idx = i * width + j;
r = input[3*idx];
g = input[3*idx + 1];
b = input[3*idx + 2];
assigned_value = (unsigned char)(0.21*r + 0.71*g + 0.07*b);
output[idx] = (unsigned char)(assigned_value);
}
}
__global__ void getHistgram(unsigned char *buffer,
long size, unsigned int *histo){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size){
atomicAdd( &(histo[buffer[i]]), 1);
i += stride;
}
}
__global__ void scan(unsigned int *input, float *output, int len, int width_height){
//@@ Modify the body of this function to complete the functionality of
//@@ the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from the host
__shared__ unsigned int T[HISTOGRAM_LENGTH];
int stride = 1;
int tx = threadIdx.x, bx = blockIdx.x;
int i = 2*bx*blockDim.x + tx;
//Load input into the shared memory
//Step 1
if(i < len){
T[tx] = input[i];
}
else{
T[tx] = 0;
}
if(i + blockDim.x < len){
T[tx + blockDim.x] = input[i + blockDim.x];
}
else{
T[tx + blockDim.x] = 0;
}
//printf("T[%d] = %d\n", tx, T[tx]);
//printf("T[%d] = %d\n", tx + blockDim.x, T[tx + blockDim.x]);
while(stride < HISTOGRAM_LENGTH){
__syncthreads();
int index = (tx + 1) * stride * 2 - 1;
if(index < HISTOGRAM_LENGTH && index - stride >= 0){
T[index] += T[index - stride];
}
stride = stride * 2;
}
stride = HISTOGRAM_LENGTH / 4;
while(stride > 0){
__syncthreads();
int index = (tx + 1)*stride*2 - 1;
if((index + stride) < HISTOGRAM_LENGTH)
T[index + stride] += T[index];
stride = stride / 2;
}
__syncthreads();
float assigned_value;
if(i < len){
assigned_value = T[tx] / (width_height*1.0);
//printf("assigned_value = %f\n", assigned_value);
output[i] = assigned_value;
}
if(i + blockDim.x < len){
assigned_value = T[tx + blockDim.x] / (width_height*1.0);
//printf("assigned_value = %f\n", assigned_value);
output[i + blockDim.x] = assigned_value;
}
}
__global__ void applyEqualization(unsigned char* input, unsigned char* output, int z_size, int y_size, int x_size, float cdfmin){
int tx = threadIdx.x, ty = threadIdx.y, tz = threadIdx.z;
int bx = blockIdx.x, by = blockIdx.y, bz = blockIdx.z;
int i = bz * blockDim.z + tz;
int j = by * blockDim.y + ty;
int k = bx * blockDim.x + tx;
unsigned char temp;
unsigned char assigned_value;
if(i < z_size && j < y_size && k < x_size){
temp = input[i*y_size*x_size + j*x_size + k];
assigned_value = correct_color(temp, cdfmin);
output[i*y_size*x_size + j*x_size + k] = (unsigned char)(assigned_value);
}
}
__global__ void uchar2float(unsigned char* input, float* output, int z_size, int y_size, int x_size){
int tx = threadIdx.x, ty = threadIdx.y, tz = threadIdx.z;
int bx = blockIdx.x, by = blockIdx.y, bz = blockIdx.z;
int i = bz * blockDim.z + tz;
int j = by * blockDim.y + ty;
int k = bx * blockDim.x + tx;
unsigned char temp;
float assigned_value;
if(i < z_size && j < y_size && k < x_size){
temp = input[i*y_size*x_size + j*x_size + k];
assigned_value = (float)(temp/255.0);
//printf("temp = %d\n", temp);
output[i*y_size*x_size + j*x_size + k] = assigned_value;
}
}
//@@ insert code here
int main(int argc, char **argv) {
wbArg_t args;
int imageWidth;
int imageHeight;
int imageChannels;
long width_height;
long total_elements;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *deviceFloatInputImageData;
float *deviceFloatOutputImageData;
unsigned int *deviceHist;
float *deviceHistCdf;
float *hostHistCdf;
float mincdf = 1.1, maxcdf = -1.0;
unsigned char *deviceGrayscaleData;
unsigned char *deviceUcharInputImageData;
unsigned char *deviceEqualizedData;
const char *inputImageFile;
//@@ Insert more code here
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(args, 0);
wbTime_start(Generic, "Importing data and creating memory on host");
inputImage = wbImport(inputImageFile);
hostInputImageData = wbImage_getData(inputImage);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
width_height = imageWidth * imageHeight;
total_elements = imageWidth * imageHeight * imageChannels;
wbLog(TRACE, "inputWidth=", imageWidth);
wbLog(TRACE, "imageHeight=", imageHeight);
wbLog(TRACE, "imageChannels=", imageChannels);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostOutputImageData = wbImage_getData(outputImage);
hostHistCdf = (float*)malloc(HISTOGRAM_LENGTH*sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
//@@ insert code here
wbTime_start(GPU, "Doing GPU memory allocation");
wbCheck(hipMalloc((void**)&deviceFloatInputImageData, total_elements*sizeof(float));
);
wbCheck(hipMalloc((void**)&deviceFloatOutputImageData, total_elements*sizeof(float));
);
wbCheck(hipMalloc((void**)&deviceUcharInputImageData, total_elements*sizeof(unsigned char));
);
wbCheck(hipMalloc((void**)&deviceGrayscaleData, width_height*sizeof(unsigned char));
);
wbCheck(hipMalloc((void**)&deviceHist, HISTOGRAM_LENGTH*sizeof(unsigned int));
);
wbCheck(hipMalloc((void**)&deviceHistCdf, HISTOGRAM_LENGTH*sizeof(float)));
wbCheck(hipMalloc((void**)&deviceEqualizedData, total_elements*sizeof(unsigned char)));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceFloatInputImageData, hostInputImageData,total_elements*sizeof(float),hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimBlock1(3, BLOCK_SIZE_1, BLOCK_SIZE_1);
dim3 DimGrid1(1, ceil(imageWidth*1.0/BLOCK_SIZE_1), ceil(imageHeight*1.0/BLOCK_SIZE_1));
hipLaunchKernelGGL(( float2uchar), dim3(DimGrid1), dim3(DimBlock1), 0, 0, deviceFloatInputImageData, deviceUcharInputImageData, imageHeight, imageWidth, imageChannels);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimBlock2(BLOCK_SIZE_1, BLOCK_SIZE_1, 1);
dim3 DimGrid2(ceil(imageWidth*1.0/BLOCK_SIZE_1),ceil(imageHeight*1.0/BLOCK_SIZE_1),1);
hipLaunchKernelGGL(( RGB2Gray), dim3(DimGrid2), dim3(DimBlock2), 0, 0, deviceUcharInputImageData, deviceGrayscaleData, imageHeight, imageWidth);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimBlock3(BLOCK_SIZE_2, 1, 1);
dim3 DimGrid3(ceil(imageHeight*imageWidth*1.0/BLOCK_SIZE_2),1,1);
hipLaunchKernelGGL(( getHistgram), dim3(DimGrid3), dim3(DimBlock3), 0, 0, deviceGrayscaleData, width_height, deviceHist);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimBlock4(HISTOGRAM_LENGTH/2,1,1);
dim3 DimGrid4(1,1,1);
hipLaunchKernelGGL(( scan), dim3(DimGrid4), dim3(DimBlock4), 0, 0, deviceHist, deviceHistCdf, HISTOGRAM_LENGTH, width_height);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbCheck(hipMemcpy(hostHistCdf, deviceHistCdf, HISTOGRAM_LENGTH*sizeof(float), hipMemcpyDeviceToHost));
for(int i = 0; i < HISTOGRAM_LENGTH; i++){
mincdf = min(mincdf, hostHistCdf[i]);
maxcdf = min(maxcdf, hostHistCdf[i]);
}
wbCheck(hipMemcpyToSymbol(HIST_CDF, hostHistCdf, HISTOGRAM_LENGTH*sizeof(float)));
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimBlock5(3, BLOCK_SIZE_1, BLOCK_SIZE_1);
dim3 DimGrid5(1, ceil(imageWidth*1.0/BLOCK_SIZE_1), ceil(imageHeight*1.0/BLOCK_SIZE_1));
hipLaunchKernelGGL(( applyEqualization), dim3(DimGrid5),dim3(DimBlock5), 0, 0, deviceUcharInputImageData, deviceEqualizedData,imageHeight, imageWidth, imageChannels, mincdf);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimBlock6(3, BLOCK_SIZE_1, BLOCK_SIZE_1);
dim3 DimGrid6(1, ceil(imageWidth*1.0/BLOCK_SIZE_1), ceil(imageHeight*1.0/BLOCK_SIZE_1));
hipLaunchKernelGGL(( uchar2float), dim3(DimGrid6), dim3(DimBlock6), 0, 0, deviceEqualizedData, deviceFloatOutputImageData, imageHeight, imageWidth, imageChannels);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(hipMemcpy(hostOutputImageData, deviceFloatOutputImageData, total_elements*sizeof(float),
hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
//@@ insert code here
wbSolution(args, outputImage);
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceFloatInputImageData);
hipFree(deviceFloatOutputImageData);
hipFree(deviceUcharInputImageData);
hipFree(deviceGrayscaleData);
hipFree(deviceHist);
hipFree(deviceHistCdf);
hipFree(deviceEqualizedData);
wbTime_stop(GPU, "Freeing GPU Memory");
free(hostHistCdf);
return 0;
}
| 216af52319a672767f42de76d6552057afefec1e.cu | #include <wb.h>
#define HISTOGRAM_LENGTH 256
#define BLOCK_SIZE_1 16
#define BLOCK_SIZE_2 512
__constant__ float HIST_CDF[HISTOGRAM_LENGTH];
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__device__ float clamp(float x, float start, float end){
return min(max(x, start), end);
}
__device__ float correct_color(unsigned char val,float cdfmin){
return clamp(255*(HIST_CDF[val] - cdfmin)/(1.0-cdfmin),0.0,255.0);
}
__global__ void float2uchar(float* input, unsigned char* output, int z_size, int y_size, int x_size){
int tx = threadIdx.x, ty = threadIdx.y, tz = threadIdx.z;
int bx = blockIdx.x, by = blockIdx.y, bz = blockIdx.z;
int i = bz * blockDim.z + tz;
int j = by * blockDim.y + ty;
int k = bx * blockDim.x + tx;
float temp;
unsigned char assigned_val;
if(i < z_size && j < y_size && k < x_size){
temp = input[i*y_size*x_size + j*x_size + k];
assigned_val = (unsigned char)(255 * temp);
output[i*y_size*x_size + j*x_size + k] = assigned_val;
}
}
__global__ void RGB2Gray(unsigned char* input, unsigned char *output, int height, int width){
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
int i = by * blockDim.y + ty;
int j = bx * blockDim.x + tx;
int idx;
unsigned char r,g,b;
unsigned char assigned_value;
if(i < height && j < width){
idx = i * width + j;
r = input[3*idx];
g = input[3*idx + 1];
b = input[3*idx + 2];
assigned_value = (unsigned char)(0.21*r + 0.71*g + 0.07*b);
output[idx] = (unsigned char)(assigned_value);
}
}
__global__ void getHistgram(unsigned char *buffer,
long size, unsigned int *histo){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size){
atomicAdd( &(histo[buffer[i]]), 1);
i += stride;
}
}
__global__ void scan(unsigned int *input, float *output, int len, int width_height){
//@@ Modify the body of this function to complete the functionality of
//@@ the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from the host
__shared__ unsigned int T[HISTOGRAM_LENGTH];
int stride = 1;
int tx = threadIdx.x, bx = blockIdx.x;
int i = 2*bx*blockDim.x + tx;
//Load input into the shared memory
//Step 1
if(i < len){
T[tx] = input[i];
}
else{
T[tx] = 0;
}
if(i + blockDim.x < len){
T[tx + blockDim.x] = input[i + blockDim.x];
}
else{
T[tx + blockDim.x] = 0;
}
//printf("T[%d] = %d\n", tx, T[tx]);
//printf("T[%d] = %d\n", tx + blockDim.x, T[tx + blockDim.x]);
while(stride < HISTOGRAM_LENGTH){
__syncthreads();
int index = (tx + 1) * stride * 2 - 1;
if(index < HISTOGRAM_LENGTH && index - stride >= 0){
T[index] += T[index - stride];
}
stride = stride * 2;
}
stride = HISTOGRAM_LENGTH / 4;
while(stride > 0){
__syncthreads();
int index = (tx + 1)*stride*2 - 1;
if((index + stride) < HISTOGRAM_LENGTH)
T[index + stride] += T[index];
stride = stride / 2;
}
__syncthreads();
float assigned_value;
if(i < len){
assigned_value = T[tx] / (width_height*1.0);
//printf("assigned_value = %f\n", assigned_value);
output[i] = assigned_value;
}
if(i + blockDim.x < len){
assigned_value = T[tx + blockDim.x] / (width_height*1.0);
//printf("assigned_value = %f\n", assigned_value);
output[i + blockDim.x] = assigned_value;
}
}
__global__ void applyEqualization(unsigned char* input, unsigned char* output, int z_size, int y_size, int x_size, float cdfmin){
int tx = threadIdx.x, ty = threadIdx.y, tz = threadIdx.z;
int bx = blockIdx.x, by = blockIdx.y, bz = blockIdx.z;
int i = bz * blockDim.z + tz;
int j = by * blockDim.y + ty;
int k = bx * blockDim.x + tx;
unsigned char temp;
unsigned char assigned_value;
if(i < z_size && j < y_size && k < x_size){
temp = input[i*y_size*x_size + j*x_size + k];
assigned_value = correct_color(temp, cdfmin);
output[i*y_size*x_size + j*x_size + k] = (unsigned char)(assigned_value);
}
}
__global__ void uchar2float(unsigned char* input, float* output, int z_size, int y_size, int x_size){
int tx = threadIdx.x, ty = threadIdx.y, tz = threadIdx.z;
int bx = blockIdx.x, by = blockIdx.y, bz = blockIdx.z;
int i = bz * blockDim.z + tz;
int j = by * blockDim.y + ty;
int k = bx * blockDim.x + tx;
unsigned char temp;
float assigned_value;
if(i < z_size && j < y_size && k < x_size){
temp = input[i*y_size*x_size + j*x_size + k];
assigned_value = (float)(temp/255.0);
//printf("temp = %d\n", temp);
output[i*y_size*x_size + j*x_size + k] = assigned_value;
}
}
//@@ insert code here
int main(int argc, char **argv) {
wbArg_t args;
int imageWidth;
int imageHeight;
int imageChannels;
long width_height;
long total_elements;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *deviceFloatInputImageData;
float *deviceFloatOutputImageData;
unsigned int *deviceHist;
float *deviceHistCdf;
float *hostHistCdf;
float mincdf = 1.1, maxcdf = -1.0;
unsigned char *deviceGrayscaleData;
unsigned char *deviceUcharInputImageData;
unsigned char *deviceEqualizedData;
const char *inputImageFile;
//@@ Insert more code here
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(args, 0);
wbTime_start(Generic, "Importing data and creating memory on host");
inputImage = wbImport(inputImageFile);
hostInputImageData = wbImage_getData(inputImage);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
width_height = imageWidth * imageHeight;
total_elements = imageWidth * imageHeight * imageChannels;
wbLog(TRACE, "inputWidth=", imageWidth);
wbLog(TRACE, "imageHeight=", imageHeight);
wbLog(TRACE, "imageChannels=", imageChannels);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostOutputImageData = wbImage_getData(outputImage);
hostHistCdf = (float*)malloc(HISTOGRAM_LENGTH*sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
//@@ insert code here
wbTime_start(GPU, "Doing GPU memory allocation");
wbCheck(cudaMalloc((void**)&deviceFloatInputImageData, total_elements*sizeof(float));
);
wbCheck(cudaMalloc((void**)&deviceFloatOutputImageData, total_elements*sizeof(float));
);
wbCheck(cudaMalloc((void**)&deviceUcharInputImageData, total_elements*sizeof(unsigned char));
);
wbCheck(cudaMalloc((void**)&deviceGrayscaleData, width_height*sizeof(unsigned char));
);
wbCheck(cudaMalloc((void**)&deviceHist, HISTOGRAM_LENGTH*sizeof(unsigned int));
);
wbCheck(cudaMalloc((void**)&deviceHistCdf, HISTOGRAM_LENGTH*sizeof(float)));
wbCheck(cudaMalloc((void**)&deviceEqualizedData, total_elements*sizeof(unsigned char)));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceFloatInputImageData, hostInputImageData,total_elements*sizeof(float),cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimBlock1(3, BLOCK_SIZE_1, BLOCK_SIZE_1);
dim3 DimGrid1(1, ceil(imageWidth*1.0/BLOCK_SIZE_1), ceil(imageHeight*1.0/BLOCK_SIZE_1));
float2uchar<<<DimGrid1, DimBlock1>>>(deviceFloatInputImageData, deviceUcharInputImageData, imageHeight, imageWidth, imageChannels);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimBlock2(BLOCK_SIZE_1, BLOCK_SIZE_1, 1);
dim3 DimGrid2(ceil(imageWidth*1.0/BLOCK_SIZE_1),ceil(imageHeight*1.0/BLOCK_SIZE_1),1);
RGB2Gray<<<DimGrid2, DimBlock2>>>(deviceUcharInputImageData, deviceGrayscaleData, imageHeight, imageWidth);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimBlock3(BLOCK_SIZE_2, 1, 1);
dim3 DimGrid3(ceil(imageHeight*imageWidth*1.0/BLOCK_SIZE_2),1,1);
getHistgram<<<DimGrid3, DimBlock3>>>(deviceGrayscaleData, width_height, deviceHist);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimBlock4(HISTOGRAM_LENGTH/2,1,1);
dim3 DimGrid4(1,1,1);
scan<<<DimGrid4, DimBlock4>>>(deviceHist, deviceHistCdf, HISTOGRAM_LENGTH, width_height);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbCheck(cudaMemcpy(hostHistCdf, deviceHistCdf, HISTOGRAM_LENGTH*sizeof(float), cudaMemcpyDeviceToHost));
for(int i = 0; i < HISTOGRAM_LENGTH; i++){
mincdf = min(mincdf, hostHistCdf[i]);
maxcdf = min(maxcdf, hostHistCdf[i]);
}
wbCheck(cudaMemcpyToSymbol(HIST_CDF, hostHistCdf, HISTOGRAM_LENGTH*sizeof(float)));
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimBlock5(3, BLOCK_SIZE_1, BLOCK_SIZE_1);
dim3 DimGrid5(1, ceil(imageWidth*1.0/BLOCK_SIZE_1), ceil(imageHeight*1.0/BLOCK_SIZE_1));
applyEqualization<<<DimGrid5,DimBlock5>>>(deviceUcharInputImageData, deviceEqualizedData,imageHeight, imageWidth, imageChannels, mincdf);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimBlock6(3, BLOCK_SIZE_1, BLOCK_SIZE_1);
dim3 DimGrid6(1, ceil(imageWidth*1.0/BLOCK_SIZE_1), ceil(imageHeight*1.0/BLOCK_SIZE_1));
uchar2float<<<DimGrid6, DimBlock6>>>(deviceEqualizedData, deviceFloatOutputImageData, imageHeight, imageWidth, imageChannels);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(cudaMemcpy(hostOutputImageData, deviceFloatOutputImageData, total_elements*sizeof(float),
cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
//@@ insert code here
wbSolution(args, outputImage);
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceFloatInputImageData);
cudaFree(deviceFloatOutputImageData);
cudaFree(deviceUcharInputImageData);
cudaFree(deviceGrayscaleData);
cudaFree(deviceHist);
cudaFree(deviceHistCdf);
cudaFree(deviceEqualizedData);
wbTime_stop(GPU, "Freeing GPU Memory");
free(hostHistCdf);
return 0;
}
|
e243894ab2c7ab47165996871f41c58ddf06a5df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common/book.h"
#include "common/cpu_bitmap.h"
#define INF 2e10f
#define rnd( x ) (x * rand() / RAND_MAX)
#define SPHERES 35
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernel(unsigned char *ptr) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = (x * blockDim.x * gridDim.x) + y;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float raio = 200;
if ((ox * ox) + (oy * oy) < (raio * raio)) {
ptr[offset*4 + 0] = 0;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 255;
ptr[offset*4 + 3] = 0;
} else {
ptr[offset*4 + 0] = 0;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
}
int main( void ) {
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate(&start) );
HANDLE_ERROR( hipEventCreate(&stop) );
HANDLE_ERROR( hipEventRecord(start, 0) );
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
HANDLE_ERROR( hipMallocManaged(&dev_bitmap, bitmap.image_size()) );
dim3 grids(DIM/16, DIM/16);
dim3 threads(16,16);
hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads), 0, 0, dev_bitmap);
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime);
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
HANDLE_ERROR( hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost) );
bitmap.display_and_exit();
hipFree(dev_bitmap);
}
| e243894ab2c7ab47165996871f41c58ddf06a5df.cu | #include "cuda.h"
#include "common/book.h"
#include "common/cpu_bitmap.h"
#define INF 2e10f
#define rnd( x ) (x * rand() / RAND_MAX)
#define SPHERES 35
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernel(unsigned char *ptr) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = (x * blockDim.x * gridDim.x) + y;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float raio = 200;
if ((ox * ox) + (oy * oy) < (raio * raio)) {
ptr[offset*4 + 0] = 0;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 255;
ptr[offset*4 + 3] = 0;
} else {
ptr[offset*4 + 0] = 0;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
}
int main( void ) {
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate(&start) );
HANDLE_ERROR( cudaEventCreate(&stop) );
HANDLE_ERROR( cudaEventRecord(start, 0) );
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
HANDLE_ERROR( cudaMallocManaged(&dev_bitmap, bitmap.image_size()) );
dim3 grids(DIM/16, DIM/16);
dim3 threads(16,16);
kernel<<<grids, threads>>>(dev_bitmap);
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime);
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
HANDLE_ERROR( cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost) );
bitmap.display_and_exit();
cudaFree(dev_bitmap);
}
|
a19e7e0cc7394e7dc4c0417c0db0fe8f5f23a949.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <numeric>
#include "MemoryManager.h"
#include "BFS.h"
#include "hipcub/hipcub.hpp"
#ifdef __INTELLISENSE__
unsigned int atomicCAS(unsigned int* address, unsigned int compare, unsigned int val);
unsigned int atomicAdd(unsigned int* address, unsigned int val);
void __syncthreads();
#endif
namespace faimGraphBFS
{
const vertex_t NOT_VISITIED = std::numeric_limits<vertex_t>::max();
struct dFrontierQueue
{
public:
unsigned int *size;
vertex_t *nodes;
dFrontierQueue(unsigned int capacity)
{
hipMalloc((void**)&nodes, sizeof(vertex_t) * capacity);
hipMalloc((void**)&size, sizeof(unsigned int));
hipMemset(size, 0, sizeof(unsigned int));
}
void Free()
{
if (nodes)
hipFree(nodes);
if (size)
hipFree(size);
nodes = nullptr;
size = nullptr;
}
__device__
void Reset()
{
*size = 0;
}
__device__
unsigned int Allocate(unsigned int n_nodes)
{
return atomicAdd(size, n_nodes);
}
};
template <typename VertexDataType, typename EdgeDataType>
__global__ void d_BFSIteration(MemoryManager *memory_manager, memory_t *memory, int page_size, int *found_new_nodes, vertex_t *frontier, int iteration)
{
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < memory_manager->number_vertices && frontier[tid] == iteration)
{
VertexDataType *vertices = (VertexDataType*)memory;
AdjacencyIterator<EdgeDataType> adjacency_iterator(pageAccess<EdgeDataType>(memory, vertices[tid].mem_index, page_size, memory_manager->start_index));
for (int i = 0; i < vertices[tid].neighbours; i++)
{
vertex_t next_node = adjacency_iterator.getDestination();
if (atomicCAS(frontier + next_node, NOT_VISITIED, iteration + 1) == NOT_VISITIED)
{
*found_new_nodes = 1;
}
adjacency_iterator.advanceIterator(i, memory_manager->edges_per_page, memory, page_size, memory_manager->start_index);
}
}
}
template <typename VertexDataType, typename EdgeDataType, size_t THREADS_PER_BLOCK>
__global__ void d_bfsBasic(MemoryManager* memory_manager, memory_t* memory, int page_size, int *found_new_nodes, vertex_t *frontier, vertex_t start_node)
{
frontier[start_node] = 0;
int iteration = 0;
do
{
*found_new_nodes = 0;
d_BFSIteration <VertexDataType, EdgeDataType> << <(memory_manager->number_vertices + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, found_new_nodes, frontier, iteration);
iteration++;
hipDeviceSynchronize();
} while (*found_new_nodes);
}
// Explores the given node with a single thread
template <typename VertexDataType, typename EdgeDataType>
__device__ void d_ExploreEdges(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
vertex_t *thread_frontier, int &n_frontier_nodes, vertex_t node, int iteration)
{
VertexDataType *vertices = (VertexDataType*)memory;
VertexDataType ¤t_node = vertices[node];
AdjacencyIterator<EdgeDataType> adjacency_iterator(pageAccess<EdgeDataType>(memory, current_node.mem_index, page_size, memory_manager->start_index));
auto n_edges = current_node.neighbours;
for (int i = 0; i < n_edges; i++)
{
vertex_t next_node = adjacency_iterator.getDestination();
if (atomicCAS(frontiers + next_node, NOT_VISITIED, iteration + 1) == NOT_VISITIED)
{
thread_frontier[n_frontier_nodes++] = next_node;
}
adjacency_iterator.advanceIterator(i, memory_manager->edges_per_page, memory, page_size, memory_manager->start_index);
}
}
// Explores the adjacency of the given node with all calling threads in parallel. n_threads should equal the number of calling threads
template<typename VertexDataType, typename EdgeDataType>
__device__ void d_ExploreEdges(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
vertex_t *thread_frontier, int &n_frontier_nodes, vertex_t node, vertex_t start, vertex_t n_threads, int iteration)
{
VertexDataType *vertices = (VertexDataType*)memory;
VertexDataType ¤t_node = vertices[node];
AdjacencyIterator<EdgeDataType> adjacency_iterator(pageAccess<EdgeDataType>(memory, current_node.mem_index, page_size, memory_manager->start_index));
auto n_edges = current_node.neighbours;
auto edges_per_page = memory_manager->edges_per_page;
vertex_t page_index = 0;
auto edge = start;
while (edge < n_edges)
{
auto target_page_index = edge / edges_per_page;
auto page_offset = edge % edges_per_page; // Offset on target page
// Do page traversals until target page
for (; page_index < target_page_index; page_index++)
adjacency_iterator.blockTraversalAbsolute(edges_per_page, memory, page_size, memory_manager->start_index);
auto const next_node = adjacency_iterator.getDestinationAt(page_offset);
if (atomicCAS(frontiers + next_node, NOT_VISITIED, iteration + 1) == NOT_VISITIED)
{
thread_frontier[n_frontier_nodes] = next_node;
n_frontier_nodes++;
}
edge += n_threads;
}
}
// Fills a frontier queue based on individual thread frontiers
// Has to be called by the entire block
template<size_t THREADS_PER_BLOCK>
__device__ void d_FillFrontierQueue(dFrontierQueue &queue, vertex_t *thread_frontier, int n_frontier_nodes)
{
typedef hipcub::BlockScan<unsigned int, THREADS_PER_BLOCK> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
// Get per-thread offset in queue
unsigned int thread_offset;
BlockScan(temp_storage).ExclusiveSum(n_frontier_nodes, thread_offset);
__syncthreads();
// Get per-block offset in queue. Last thread knows total size, so it does the allocation
__shared__ unsigned int block_offset;
if (threadIdx.x == THREADS_PER_BLOCK - 1)
{
unsigned int total_size = thread_offset + n_frontier_nodes;
if (total_size == 0)
block_offset = std::numeric_limits<unsigned int>::max();
else
block_offset = queue.Allocate(total_size);
}
__syncthreads();
// If we didn't discover any new nodes we don't have anything to copy to the queue
if (block_offset == std::numeric_limits<unsigned int>::max())
return;
// Lastly, copy all discovered nodes to the queue on a per-thread basis
thread_offset += block_offset;
for (int i = 0; i < n_frontier_nodes; i++)
queue.nodes[thread_offset + i] = thread_frontier[i];
}
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__ void d_ExploreEdges_kernel(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
vertex_t node, dFrontierQueue newFrontierQueue, int iteration)
{
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int n_threads = blockDim.x * gridDim.x;
vertex_t thread_frontier[EDGES_PER_THREAD];
int n_frontier_nodes = 0;
d_ExploreEdges<VertexDataType, EdgeDataType>(memory_manager, memory, page_size, frontiers,
thread_frontier, n_frontier_nodes, node, tid, n_threads, iteration);
__syncthreads();
d_FillFrontierQueue<THREADS_PER_BLOCK>(newFrontierQueue, thread_frontier, n_frontier_nodes);
}
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__ void d_bfsDynamicParalellismIteration(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue newFrontierQueue, dFrontierQueue oldFrontierQueue, int iteration)
{
int const edges_per_block = THREADS_PER_BLOCK * EDGES_PER_THREAD;
unsigned int id = threadIdx.x + blockDim.x * blockIdx.x;
vertex_t thread_frontier[EDGES_PER_THREAD];
int n_frontier_nodes = 0;
VertexDataType *vertices = (VertexDataType*)memory;
if (id < *oldFrontierQueue.size)
{
auto const current_node = oldFrontierQueue.nodes[id];
vertex_t n_edges = vertices[current_node].neighbours;
AdjacencyIterator<EdgeDataType> adjacency_iterator(pageAccess<EdgeDataType>(memory, vertices[current_node].mem_index, page_size, memory_manager->start_index));
if (n_edges <= EDGES_PER_THREAD)
{
d_ExploreEdges<VertexDataType, EdgeDataType>(memory_manager, memory, page_size, frontiers,
thread_frontier, n_frontier_nodes, current_node, iteration);
}
else
{
//printf("Using sub-kernel with %d blocks for %d edges\n", (n_edges + edges_per_block - 1) / edges_per_block, n_edges);
d_ExploreEdges_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK> << <(n_edges + edges_per_block - 1) / edges_per_block, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, frontiers, current_node, newFrontierQueue, iteration);
}
}
__syncthreads();
d_FillFrontierQueue<THREADS_PER_BLOCK>(newFrontierQueue, thread_frontier, n_frontier_nodes);
}
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__ void d_bfsDynamicParalellism(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue newFrontierQueue, dFrontierQueue oldFrontierQueue, vertex_t start_node)
{
frontiers[start_node] = 0;
newFrontierQueue.Allocate(1);
newFrontierQueue.nodes[0] = start_node;
int iteration = 0;
do
{
auto temp = oldFrontierQueue;
oldFrontierQueue = newFrontierQueue;
newFrontierQueue = temp;
newFrontierQueue.Reset();
d_bfsDynamicParalellismIteration<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <(*oldFrontierQueue.size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, frontiers, newFrontierQueue, oldFrontierQueue, iteration);
iteration++;
hipDeviceSynchronize();
} while (*newFrontierQueue.size > 0);
}
template<typename VertexDataType, size_t THREADS_PER_BLOCK, size_t NODES_PER_THREAD, size_t EDGES_PER_THREAD>
__global__ void d_ClassifyNodes_kernel(MemoryManager *memory_manager, memory_t *memory, dFrontierQueue rawFrontier,
dFrontierQueue smallNodesFrontier, dFrontierQueue mediumNodesFrontier, dFrontierQueue largeNodesFrontier)
{
unsigned int const tid = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = blockDim.x * gridDim.x;
VertexDataType *vertices = (VertexDataType*)memory;
vertex_t smallThreadFrontier[NODES_PER_THREAD];
vertex_t mediumThreadFrontier[NODES_PER_THREAD];
vertex_t largeThreadFrontier[NODES_PER_THREAD];
int n_small = 0;
int n_medium = 0;
int n_large = 0;
for (unsigned int i = tid, end = *rawFrontier.size; i < end; i += stride)
{
auto const node = rawFrontier.nodes[i];
auto const n_edges = vertices[node].neighbours;
if (n_edges <= EDGES_PER_THREAD)
{
//printf("Classifying node %u with %u edges as small\n", node, n_edges);
smallThreadFrontier[n_small++] = node;
}
else if (n_edges <= EDGES_PER_THREAD * WARPSIZE)
{
//printf("Classifying node %u with %u edges as medium\n", node, n_edges);
mediumThreadFrontier[n_medium++] = node;
}
else
{
//printf("Classifying node %u with %u edges as large\n", node, n_edges);
largeThreadFrontier[n_large++] = node;
}
}
__syncthreads();
d_FillFrontierQueue<THREADS_PER_BLOCK>(smallNodesFrontier, smallThreadFrontier, n_small);
d_FillFrontierQueue<THREADS_PER_BLOCK>(mediumNodesFrontier, mediumThreadFrontier, n_medium);
d_FillFrontierQueue<THREADS_PER_BLOCK>(largeNodesFrontier, largeThreadFrontier, n_large);
}
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__ void d_ExploreFrontier_kernel(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue frontier, dFrontierQueue newFrontier, vertex_t threads_per_node, unsigned int iteration)
{
unsigned int const tid = threadIdx.x + blockIdx.x * blockDim.x;
vertex_t offset = tid % threads_per_node;
vertex_t thread_frontier[EDGES_PER_THREAD];
int n_frontier_nodes = 0;
unsigned int node_index = tid / threads_per_node;
if (node_index < *frontier.size)
{
auto const node = frontier.nodes[node_index];
//printf("Thread %u exploring node %u\n", tid, node);
d_ExploreEdges<VertexDataType, EdgeDataType>
(memory_manager, memory, page_size, frontiers, thread_frontier, n_frontier_nodes, node, offset, threads_per_node, iteration);
}
__syncthreads();
d_FillFrontierQueue<THREADS_PER_BLOCK>(newFrontier, thread_frontier, n_frontier_nodes);
}
// Gets the size of the largest node in the queue and stores it in current_max_node_size
template <typename VertexDataType, size_t THREADS_PER_BLOCK, size_t ITEMS_PER_THREAD>
__global__
void d_GetMaxNodeSize_kernel(dFrontierQueue queue, memory_t *memory, vertex_t *current_max_node_size)
{
using BlockReduce = hipcub::BlockReduce<unsigned int, THREADS_PER_BLOCK>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int tid = threadIdx.x;
int start = tid * ITEMS_PER_THREAD;
VertexDataType *vertices = (VertexDataType*)memory;
vertex_t node_sizes[ITEMS_PER_THREAD];
for (int i = 0; i < ITEMS_PER_THREAD; i++)
{
vertex_t node_index = start + i;
if (node_index > *queue.size)
{
// Don't forget to set unused entries to 0
for (; i < ITEMS_PER_THREAD; i++)
node_sizes[i] = 0;
break;
}
vertex_t node = queue.nodes[node_index];
node_sizes[i] = vertices[node].neighbours;
}
vertex_t max_size = BlockReduce(temp_storage).Reduce(node_sizes, hipcub::Max());
if (tid == 0)
{
*current_max_node_size = max_size;
}
}
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__ void d_BFSPreprocessing(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue rawFrontier, dFrontierQueue smallNodesFrontier, dFrontierQueue mediumNodesFrontier,
dFrontierQueue largeNodesFrontier, dFrontierQueue hugeNodesFrontier, vertex_t *current_max_node_size, vertex_t start_node)
{
size_t const EDGES_PER_BLOCK = THREADS_PER_BLOCK * EDGES_PER_THREAD;
frontiers[start_node] = 0;
rawFrontier.Allocate(1);
rawFrontier.nodes[0] = start_node;
unsigned int iteration = 0;
while (*rawFrontier.size > 0)
{
smallNodesFrontier.Reset();
mediumNodesFrontier.Reset();
largeNodesFrontier.Reset();
//printf("Iteration %u, %u nodes\n", iteration, *rawFrontier.size);
d_ClassifyNodes_kernel<VertexDataType, THREADS_PER_BLOCK, EDGES_PER_THREAD, EDGES_PER_THREAD>
<< <(*rawFrontier.size + EDGES_PER_BLOCK - 1) / EDGES_PER_BLOCK, THREADS_PER_BLOCK >> >
(memory_manager, memory, rawFrontier, smallNodesFrontier, mediumNodesFrontier, largeNodesFrontier);
hipDeviceSynchronize();
//printf("Queue sizes: %u, %u, %u\n", *smallNodesFrontier.size, *mediumNodesFrontier.size, *largeNodesFrontier.size);
rawFrontier.Reset();
if (*hugeNodesFrontier.size > 0)
{
if (*hugeNodesFrontier.size <= THREADS_PER_BLOCK)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 1> << <1, THREADS_PER_BLOCK >> > (hugeNodesFrontier, memory, current_max_node_size);
else if (*hugeNodesFrontier.size <= THREADS_PER_BLOCK * 4)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 4> << <1, THREADS_PER_BLOCK >> > (hugeNodesFrontier, memory, current_max_node_size);
else if (*hugeNodesFrontier.size <= THREADS_PER_BLOCK * 16)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 16> << <1, THREADS_PER_BLOCK >> > (hugeNodesFrontier, memory, current_max_node_size);
else if (*hugeNodesFrontier.size <= THREADS_PER_BLOCK * 64)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 64> << <1, THREADS_PER_BLOCK >> > (hugeNodesFrontier, memory, current_max_node_size);
else
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 128> << <1, THREADS_PER_BLOCK >> > (hugeNodesFrontier, memory, current_max_node_size);
hipDeviceSynchronize();
vertex_t n_blocks = (*current_max_node_size + EDGES_PER_BLOCK - 1) / EDGES_PER_BLOCK;
d_ExploreFrontier_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <n_blocks * *hugeNodesFrontier.size, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, frontiers, hugeNodesFrontier, rawFrontier, n_blocks * THREADS_PER_BLOCK, iteration);
}
if (*smallNodesFrontier.size > 0)
{
d_ExploreFrontier_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <(*smallNodesFrontier.size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, frontiers, smallNodesFrontier, rawFrontier, 1, iteration);
}
if (*mediumNodesFrontier.size > 0)
{
d_ExploreFrontier_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <((*mediumNodesFrontier.size * WARPSIZE) + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, frontiers, mediumNodesFrontier, rawFrontier, WARPSIZE, iteration);
}
if (*largeNodesFrontier.size > 0)
{
d_ExploreFrontier_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <*largeNodesFrontier.size, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, frontiers, largeNodesFrontier, rawFrontier, THREADS_PER_BLOCK, iteration);
}
hipDeviceSynchronize();
iteration++;
}
}
// Explores the adjacency of the given node with all calling threads in parallel. n_threads should equal the number of calling threads
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__device__ void d_ExploreEdgesClassification(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
vertex_t *smallThreadFrontier, int &n_small, bool &foundSmallNode,
vertex_t *mediumThreadFrontier, int &n_medium, bool &foundMediumNode,
vertex_t *largeThreadFrontier, int &n_large, bool &foundLargeNode,
vertex_t *hugeThreadFrontier, int &n_huge, bool &foundHugeNode,
vertex_t node, vertex_t start, vertex_t n_threads, int iteration)
{
VertexDataType *vertices = (VertexDataType*)memory;
VertexDataType ¤t_node = vertices[node];
AdjacencyIterator<EdgeDataType> adjacency_iterator(pageAccess<EdgeDataType>(memory, current_node.mem_index, page_size, memory_manager->start_index));
auto n_edges = current_node.neighbours;
auto edges_per_page = memory_manager->edges_per_page;
vertex_t page_index = 0;
auto edge = start;
while (edge < n_edges)
{
auto target_page_index = edge / edges_per_page;
auto page_offset = edge % edges_per_page; // Offset on target page
// Do page traversals until target page
for (; page_index < target_page_index; page_index++)
adjacency_iterator.blockTraversalAbsolute(edges_per_page, memory, page_size, memory_manager->start_index);
auto const next_node = adjacency_iterator.getDestinationAt(page_offset);
if (atomicCAS(frontiers + next_node, NOT_VISITIED, iteration + 1) == NOT_VISITIED)
{
vertex_t const n_edges = vertices[next_node].neighbours;
if (n_edges <= EDGES_PER_THREAD)
{
//printf("Classifying node %u with %u edges as small\n", next_node, n_edges);
smallThreadFrontier[n_small++] = next_node;
foundSmallNode = true;
}
else if (n_edges <= EDGES_PER_THREAD * 32)
{
//printf("Classifying node %u with %u edges as medium\n", next_node, n_edges);
mediumThreadFrontier[n_medium++] = next_node;
foundMediumNode = true;
}
else if (n_edges <= EDGES_PER_THREAD * THREADS_PER_BLOCK)
{
//printf("Classifying node %u with %u edges as large\n", next_node, n_edges);
largeThreadFrontier[n_large++] = next_node;
foundLargeNode = true;
}
else
{
hugeThreadFrontier[n_huge++] = next_node;
foundHugeNode = true;
}
}
edge += n_threads;
}
}
// // Checks a single edge for its discovered status and classifies it if necessary
// template<size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
// __device__
// void d_CheckAndClassifyEdge(unsigned int edge, unsigned int *row_offsets, unsigned int *col_ids, unsigned int *frontiers, unsigned int iteration,
// unsigned int smallThreadFrontier[], int &n_small, bool &foundSmallNode,
// unsigned int mediumThreadFrontier[], int &n_medium, bool &foundMediumNode,
// unsigned int largeThreadFrontier[], int &n_large, bool &foundLargeNode,
// unsigned int hugeThreadFrontier[], int &n_huge, bool &foundHugeNode)
// {
// unsigned int next_node = col_ids[edge];
// if (atomicCAS(frontiers + next_node, NO_DEPTH, iteration + 1) == NO_DEPTH)
// {
// unsigned int const n_edges = row_offsets[next_node + 1] - row_offsets[next_node];
// if (n_edges <= EDGES_PER_THREAD)
// {
// //printf("Classifying node %u with %u edges as small\n", next_node, n_edges);
// smallThreadFrontier[n_small++] = next_node;
// foundSmallNode = true;
// }
// else if (n_edges <= EDGES_PER_THREAD * 32)
// {
// //printf("Classifying node %u with %u edges as medium\n", next_node, n_edges);
// mediumThreadFrontier[n_medium++] = next_node;
// foundMediumNode = true;
// }
// else if (n_edges <= EDGES_PER_BLOCK)
// {
// //printf("Classifying node %u with %u edges as large\n", next_node, n_edges);
// largeThreadFrontier[n_large++] = next_node;
// foundLargeNode = true;
// }
// else
// {
// hugeThreadFrontier[n_huge++] = next_node;
// foundHugeNode = true;
// }
// }
// }
// threads_per_node should divide the number of threads evenly
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__
void d_ExploreFrontierClassification_kernel(dFrontierQueue frontier, MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue newSmallNodesFrontier, dFrontierQueue newMediumNodesFrontier,
dFrontierQueue newLargeNodesFrontier, dFrontierQueue newHugeNodesFrontier,
unsigned int threads_per_node, unsigned int iteration)
{
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int offset = tid % threads_per_node;
vertex_t smallThreadFrontier[EDGES_PER_THREAD];
vertex_t mediumThreadFrontier[EDGES_PER_THREAD];
vertex_t largeThreadFrontier[EDGES_PER_THREAD];
vertex_t hugeThreadFrontier[EDGES_PER_THREAD];
int n_small = 0;
int n_medium = 0;
int n_large = 0;
int n_huge = 0;
__shared__ bool foundSmallNodes;
__shared__ bool foundMediumNodes;
__shared__ bool foundLargeNodes;
__shared__ bool foundHugeNodes;
if (threadIdx.x == 0)
{
foundSmallNodes = false;
foundMediumNodes = false;
foundLargeNodes = false;
foundHugeNodes = false;
}
__syncthreads();
unsigned int node_index = tid / threads_per_node;
if (node_index < *frontier.size)
{
vertex_t node = frontier.nodes[node_index];
d_ExploreEdgesClassification<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>(memory_manager, memory, page_size, frontiers,
smallThreadFrontier, n_small, foundSmallNodes,
mediumThreadFrontier, n_medium, foundMediumNodes,
largeThreadFrontier, n_large, foundLargeNodes,
hugeThreadFrontier, n_huge, foundHugeNodes,
node, offset, threads_per_node, iteration);
}
__syncthreads();
if (foundSmallNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newSmallNodesFrontier, smallThreadFrontier, n_small);
if (foundMediumNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newMediumNodesFrontier, mediumThreadFrontier, n_medium);
if (foundLargeNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newLargeNodesFrontier, largeThreadFrontier, n_large);
if (foundHugeNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newHugeNodesFrontier, hugeThreadFrontier, n_huge);
}
// Explores all nodes in the frontier by having each thread explore a bit of each node
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__
void d_ExploreHugeNodesFrontierClassification_kernel(dFrontierQueue frontier, MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue newSmallNodesFrontier, dFrontierQueue newMediumNodesFrontier,
dFrontierQueue newLargeNodesFrontier, dFrontierQueue newHugeNodesFrontier,
unsigned int threads_per_node, unsigned int iteration)
{
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int offset = tid % threads_per_node;
vertex_t smallThreadFrontier[EDGES_PER_THREAD];
vertex_t mediumThreadFrontier[EDGES_PER_THREAD];
vertex_t largeThreadFrontier[EDGES_PER_THREAD];
vertex_t hugeThreadFrontier[EDGES_PER_THREAD];
int n_small;
int n_medium;
int n_large;
int n_huge;
__shared__ bool foundSmallNodes;
__shared__ bool foundMediumNodes;
__shared__ bool foundLargeNodes;
__shared__ bool foundHugeNodes;
for (unsigned int node_index = 0; node_index < *frontier.size; node_index++)
{
n_small = 0;
n_medium = 0;
n_large = 0;
n_huge = 0;
if (threadIdx.x == 0)
{
foundSmallNodes = false;
foundMediumNodes = false;
foundLargeNodes = false;
foundHugeNodes = false;
}
__syncthreads();
vertex_t node = frontier.nodes[node_index];
d_ExploreEdgesClassification<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>(memory_manager, memory, page_size, frontiers,
smallThreadFrontier, n_small, foundSmallNodes,
mediumThreadFrontier, n_medium, foundMediumNodes,
largeThreadFrontier, n_large, foundLargeNodes,
hugeThreadFrontier, n_huge, foundHugeNodes,
node, offset, threads_per_node, iteration);
__syncthreads();
if (foundSmallNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newSmallNodesFrontier, smallThreadFrontier, n_small);
if (foundMediumNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newMediumNodesFrontier, mediumThreadFrontier, n_medium);
if (foundLargeNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newLargeNodesFrontier, largeThreadFrontier, n_large);
if (foundHugeNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newHugeNodesFrontier, hugeThreadFrontier, n_huge);
}
}
__device__
void d_SwapQueues(dFrontierQueue &queue1, dFrontierQueue &queue2)
{
dFrontierQueue temp = queue1;
queue1 = queue2;
queue2 = temp;
}
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__
void d_bfsClassification(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue newSmallNodesFrontier, dFrontierQueue newMediumNodesFrontier,
dFrontierQueue newLargeNodesFrontier, dFrontierQueue newHugeNodesFrontier,
dFrontierQueue oldSmallNodesFrontier, dFrontierQueue oldMediumNodesFrontier,
dFrontierQueue oldLargeNodesFrontier, dFrontierQueue oldHugeNodesFrontier,
unsigned int *current_max_node_size, vertex_t starting_node)
{
VertexDataType *vertices = (VertexDataType*)memory;
size_t const EDGES_PER_BLOCK = EDGES_PER_THREAD * THREADS_PER_BLOCK;
unsigned int n_edges = vertices[starting_node].neighbours;
if (n_edges <= EDGES_PER_THREAD)
{
newSmallNodesFrontier.Allocate(1);
newSmallNodesFrontier.nodes[0] = starting_node;
}
else if (n_edges <= EDGES_PER_THREAD * 32)
{
newMediumNodesFrontier.Allocate(1);
newMediumNodesFrontier.nodes[0] = starting_node;
}
else if (n_edges <= EDGES_PER_BLOCK)
{
newLargeNodesFrontier.Allocate(1);
newLargeNodesFrontier.nodes[0] = starting_node;
}
else
{
newHugeNodesFrontier.Allocate(1);
newHugeNodesFrontier.nodes[0] = starting_node;
}
unsigned int iteration = 0;
do
{
//printf("iteration %u\n", iteration);
d_SwapQueues(newSmallNodesFrontier, oldSmallNodesFrontier);
newSmallNodesFrontier.Reset();
d_SwapQueues(newMediumNodesFrontier, oldMediumNodesFrontier);
newMediumNodesFrontier.Reset();
d_SwapQueues(newLargeNodesFrontier, oldLargeNodesFrontier);
newLargeNodesFrontier.Reset();
d_SwapQueues(newHugeNodesFrontier, oldHugeNodesFrontier);
newHugeNodesFrontier.Reset();
//printf("Queue sizes: %u, %u, %u, %u\n", *data.oldSmallNodesFrontier.size, *data.oldMediumNodesFrontier.size, *data.oldLargeNodesFrontier.size, *data.oldHugeNodesFrontier.size);
if (*oldHugeNodesFrontier.size > 0)
{
if (*oldHugeNodesFrontier.size <= THREADS_PER_BLOCK)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 1> << <1, THREADS_PER_BLOCK >> > (oldHugeNodesFrontier, memory, current_max_node_size);
else if (*oldHugeNodesFrontier.size <= THREADS_PER_BLOCK * 4)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 4> << <1, THREADS_PER_BLOCK >> > (oldHugeNodesFrontier, memory, current_max_node_size);
else if (*oldHugeNodesFrontier.size <= THREADS_PER_BLOCK * 16)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 16> << <1, THREADS_PER_BLOCK >> > (oldHugeNodesFrontier, memory, current_max_node_size);
else if (*oldHugeNodesFrontier.size <= THREADS_PER_BLOCK * 64)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 64> << <1, THREADS_PER_BLOCK >> > (oldHugeNodesFrontier, memory, current_max_node_size);
else
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 128> << <1, THREADS_PER_BLOCK >> > (oldHugeNodesFrontier, memory, current_max_node_size);
hipDeviceSynchronize();
unsigned int n_blocks = (*current_max_node_size + EDGES_PER_BLOCK - 1) / EDGES_PER_BLOCK;
d_ExploreHugeNodesFrontierClassification_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK> << <n_blocks, THREADS_PER_BLOCK >> > (
oldHugeNodesFrontier, memory_manager, memory, page_size, frontiers,
newSmallNodesFrontier, newMediumNodesFrontier, newLargeNodesFrontier, newHugeNodesFrontier,
n_blocks * THREADS_PER_BLOCK, iteration);
}
if (*oldSmallNodesFrontier.size > 0)
{
d_ExploreFrontierClassification_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <(*oldSmallNodesFrontier.size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >
(oldSmallNodesFrontier, memory_manager, memory, page_size, frontiers,
newSmallNodesFrontier, newMediumNodesFrontier, newLargeNodesFrontier, newHugeNodesFrontier,
1, iteration);
}
if (*oldMediumNodesFrontier.size > 0)
{
d_ExploreFrontierClassification_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <((*oldMediumNodesFrontier.size * 32) + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >
(oldMediumNodesFrontier, memory_manager, memory, page_size, frontiers,
newSmallNodesFrontier, newMediumNodesFrontier, newLargeNodesFrontier, newHugeNodesFrontier,
32, iteration);
}
if (*oldLargeNodesFrontier.size > 0)
{
d_ExploreFrontierClassification_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <*oldLargeNodesFrontier.size, THREADS_PER_BLOCK >> >
(oldLargeNodesFrontier, memory_manager, memory, page_size, frontiers,
newSmallNodesFrontier, newMediumNodesFrontier, newLargeNodesFrontier, newHugeNodesFrontier,
THREADS_PER_BLOCK, iteration);
}
hipDeviceSynchronize();
iteration++;
//printf("Queue sizes: %u, %u, %u, %u\n", *data.newSmallNodesFrontier.size, *data.newMediumNodesFrontier.size, *data.newLargeNodesFrontier.size, *data.newHugeNodesFrontier.size);
} while (*newSmallNodesFrontier.size > 0
|| *newMediumNodesFrontier.size > 0
|| *newLargeNodesFrontier.size > 0);
}
}
template <typename VertexDataType, typename EdgeDataType>
std::vector<vertex_t> BFS<VertexDataType, EdgeDataType>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing, vertex_t start_vertex)
{
hipEvent_t start_allocation, end_allocation, start_kernel, end_kernel, start_copy, end_copy;
int *dev_found_new_nodes;
vertex_t *dev_frontier;
start_clock(start_allocation, end_allocation);
hipMalloc((void**)&dev_found_new_nodes, sizeof(int));
hipMalloc((void**)&dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices);
hipMemset(dev_frontier, faimGraphBFS::NOT_VISITIED, sizeof(vertex_t) * memory_manager->number_vertices);
float allocation_time = end_clock(start_allocation, end_allocation);
start_clock(start_kernel, end_kernel);
hipLaunchKernelGGL(( faimGraphBFS::d_bfsBasic <VertexDataType, EdgeDataType, 256>) , dim3(1), dim3(1), 0, 0,
(MemoryManager*)memory_manager->d_memory, memory_manager->d_data, memory_manager->page_size, dev_found_new_nodes, dev_frontier, start_vertex);
float kernel_time = end_clock(start_kernel, end_kernel);
start_clock(start_copy, end_copy);
std::vector<vertex_t> result;
result.reserve(memory_manager->number_vertices);
hipMemcpy(&result[0], dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices, hipMemcpyDeviceToHost);
float copy_time = end_clock(start_copy, end_copy);
hipFree(dev_found_new_nodes);
hipFree(dev_frontier);
//printf("algBFSBasic done with kernel time: %fms, allocation time %fms, and result copy time %fms\n", kernel_time, allocation_time, copy_time);
timing.overall_alloc += allocation_time;
timing.overall_kernel += kernel_time;
timing.overall_cpy += copy_time;
return result;
}
template <typename VertexDataType, typename EdgeDataType>
std::vector<vertex_t> BFS<VertexDataType, EdgeDataType>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing, vertex_t start_vertex)
{
hipEvent_t start_allocation, end_allocation, start_kernel, end_kernel, start_copy, end_copy;
size_t launch_limit;
hipDeviceGetLimit(&launch_limit, hipLimitDevRuntimePendingLaunchCount);
hipDeviceSetLimit(hipLimitDevRuntimePendingLaunchCount, 32768);
vertex_t *dev_frontier;
start_clock(start_allocation, end_allocation);
faimGraphBFS::dFrontierQueue newFrontierQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue oldFrontierQueue(memory_manager->number_vertices);
hipMalloc((void**)&dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices);
hipMemset(dev_frontier, faimGraphBFS::NOT_VISITIED, sizeof(vertex_t) * memory_manager->number_vertices);
float allocation_time = end_clock(start_allocation, end_allocation);
start_clock(start_kernel, end_kernel);
faimGraphBFS::d_bfsDynamicParalellism <VertexDataType, EdgeDataType, 64, 256> << <1, 1 >> >
((MemoryManager*)memory_manager->d_memory, memory_manager->d_data, memory_manager->page_size, dev_frontier,
newFrontierQueue, oldFrontierQueue, start_vertex);
float kernel_time = end_clock(start_kernel, end_kernel);
start_clock(start_copy, end_copy);
std::vector<vertex_t> result;
result.reserve(memory_manager->number_vertices);
hipMemcpy(&result[0], dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices, hipMemcpyDeviceToHost);
float copy_time = end_clock(start_copy, end_copy);
newFrontierQueue.Free();
oldFrontierQueue.Free();
hipFree(dev_frontier);
hipDeviceSetLimit(hipLimitDevRuntimePendingLaunchCount, launch_limit);
//printf("algBFSDynamicParalellism done with kernel time: %fms, allocation time %fms, and result copy time %fms\n", kernel_time, allocation_time, copy_time);
timing.overall_alloc += allocation_time;
timing.overall_kernel += kernel_time;
timing.overall_cpy += copy_time;
return result;
}
template <typename VertexDataType, typename EdgeDataType>
std::vector<vertex_t> BFS<VertexDataType, EdgeDataType>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing, vertex_t start_vertex)
{
hipEvent_t start_allocation, end_allocation, start_kernel, end_kernel, start_copy, end_copy;
vertex_t *dev_frontier;
unsigned int *current_max_node_size; // Used for huge frontier handling
start_clock(start_allocation, end_allocation);
faimGraphBFS::dFrontierQueue rawFrontierQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue smallNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue mediumNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue largeNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue hugeNodesQueue(memory_manager->number_vertices);
hipMalloc((void**)&dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices);
hipMalloc((void**)¤t_max_node_size, sizeof(unsigned int));
hipMemset(dev_frontier, faimGraphBFS::NOT_VISITIED, sizeof(vertex_t) * memory_manager->number_vertices);
float allocation_time = end_clock(start_allocation, end_allocation);
start_clock(start_kernel, end_kernel);
faimGraphBFS::d_BFSPreprocessing<VertexDataType, EdgeDataType, 4, 128> << <1, 1 >> >
((MemoryManager*)memory_manager->d_memory, memory_manager->d_data, memory_manager->page_size, dev_frontier,
rawFrontierQueue, smallNodesQueue, mediumNodesQueue, largeNodesQueue, hugeNodesQueue, current_max_node_size, start_vertex);
float kernel_time = end_clock(start_kernel, end_kernel);
start_clock(start_copy, end_copy);
std::vector<vertex_t> result;
result.reserve(memory_manager->number_vertices);
hipMemcpy(&result[0], dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices, hipMemcpyDeviceToHost);
float copy_time = end_clock(start_copy, end_copy);
rawFrontierQueue.Free();
smallNodesQueue.Free();
mediumNodesQueue.Free();
largeNodesQueue.Free();
hugeNodesQueue.Free();
hipFree(dev_frontier);
hipFree(current_max_node_size);
//printf("algBFSPreprocessing done with kernel time: %fms, allocation time %fms\n", kernel_time, allocation_time);
timing.overall_alloc += allocation_time;
timing.overall_kernel += kernel_time;
timing.overall_cpy += copy_time;
return result;
}
template <typename VertexDataType, typename EdgeDataType>
std::vector<vertex_t> BFS<VertexDataType, EdgeDataType>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing, vertex_t start_vertex)
{
hipEvent_t start_allocation, end_allocation, start_kernel, end_kernel, start_copy, end_copy;
vertex_t *dev_frontier;
unsigned int *current_max_node_size; // Used for huge frontier handling
start_clock(start_allocation, end_allocation);
faimGraphBFS::dFrontierQueue newSmallNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue newMediumNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue newLargeNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue newHugeNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue oldSmallNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue oldMediumNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue oldLargeNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue oldHugeNodesQueue(memory_manager->number_vertices);
hipMalloc((void**)&dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices);
hipMalloc((void**)¤t_max_node_size, sizeof(unsigned int));
hipMemset(dev_frontier, faimGraphBFS::NOT_VISITIED, sizeof(vertex_t) * memory_manager->number_vertices);
float allocation_time = end_clock(start_allocation, end_allocation);
start_clock(start_kernel, end_kernel);
faimGraphBFS::d_bfsClassification<VertexDataType, EdgeDataType, 16, 256> << <1, 1 >> >
((MemoryManager*)memory_manager->d_memory, memory_manager->d_data, memory_manager->page_size, dev_frontier,
newSmallNodesQueue, newMediumNodesQueue, newLargeNodesQueue, newHugeNodesQueue,
oldSmallNodesQueue, oldMediumNodesQueue, oldLargeNodesQueue, oldHugeNodesQueue,
current_max_node_size, start_vertex);
float kernel_time = end_clock(start_kernel, end_kernel);
start_clock(start_copy, end_copy);
std::vector<vertex_t> result;
result.reserve(memory_manager->number_vertices);
hipMemcpy(&result[0], dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices, hipMemcpyDeviceToHost);
float copy_time = end_clock(start_copy, end_copy);
oldSmallNodesQueue.Free();
oldMediumNodesQueue.Free();
oldLargeNodesQueue.Free();
oldHugeNodesQueue.Free();
newSmallNodesQueue.Free();
newMediumNodesQueue.Free();
newLargeNodesQueue.Free();
newHugeNodesQueue.Free();
hipFree(dev_frontier);
hipFree(current_max_node_size);
//printf("algBFSClassification done with kernel time: %fms, allocation time %fms, and result copy time %fms\n", kernel_time, allocation_time, copy_time);
timing.overall_alloc += allocation_time;
timing.overall_kernel += kernel_time;
timing.overall_cpy += copy_time;
return result;
}
template std::vector<vertex_t> BFS<VertexData, EdgeData>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing, vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeight>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemantic>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeDataSOA>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeightSOA>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemanticSOA>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeData>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeight>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemantic>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeDataSOA>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeightSOA>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemanticSOA>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeData>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeight>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemantic>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeDataSOA>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeightSOA>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemanticSOA>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeData>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeight>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemantic>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeDataSOA>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeightSOA>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemanticSOA>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
| a19e7e0cc7394e7dc4c0417c0db0fe8f5f23a949.cu | #include <numeric>
#include "MemoryManager.h"
#include "BFS.h"
#include "cub/cub.cuh"
#ifdef __INTELLISENSE__
unsigned int atomicCAS(unsigned int* address, unsigned int compare, unsigned int val);
unsigned int atomicAdd(unsigned int* address, unsigned int val);
void __syncthreads();
#endif
namespace faimGraphBFS
{
const vertex_t NOT_VISITIED = std::numeric_limits<vertex_t>::max();
struct dFrontierQueue
{
public:
unsigned int *size;
vertex_t *nodes;
dFrontierQueue(unsigned int capacity)
{
cudaMalloc((void**)&nodes, sizeof(vertex_t) * capacity);
cudaMalloc((void**)&size, sizeof(unsigned int));
cudaMemset(size, 0, sizeof(unsigned int));
}
void Free()
{
if (nodes)
cudaFree(nodes);
if (size)
cudaFree(size);
nodes = nullptr;
size = nullptr;
}
__device__
void Reset()
{
*size = 0;
}
__device__
unsigned int Allocate(unsigned int n_nodes)
{
return atomicAdd(size, n_nodes);
}
};
template <typename VertexDataType, typename EdgeDataType>
__global__ void d_BFSIteration(MemoryManager *memory_manager, memory_t *memory, int page_size, int *found_new_nodes, vertex_t *frontier, int iteration)
{
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < memory_manager->number_vertices && frontier[tid] == iteration)
{
VertexDataType *vertices = (VertexDataType*)memory;
AdjacencyIterator<EdgeDataType> adjacency_iterator(pageAccess<EdgeDataType>(memory, vertices[tid].mem_index, page_size, memory_manager->start_index));
for (int i = 0; i < vertices[tid].neighbours; i++)
{
vertex_t next_node = adjacency_iterator.getDestination();
if (atomicCAS(frontier + next_node, NOT_VISITIED, iteration + 1) == NOT_VISITIED)
{
*found_new_nodes = 1;
}
adjacency_iterator.advanceIterator(i, memory_manager->edges_per_page, memory, page_size, memory_manager->start_index);
}
}
}
template <typename VertexDataType, typename EdgeDataType, size_t THREADS_PER_BLOCK>
__global__ void d_bfsBasic(MemoryManager* memory_manager, memory_t* memory, int page_size, int *found_new_nodes, vertex_t *frontier, vertex_t start_node)
{
frontier[start_node] = 0;
int iteration = 0;
do
{
*found_new_nodes = 0;
d_BFSIteration <VertexDataType, EdgeDataType> << <(memory_manager->number_vertices + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, found_new_nodes, frontier, iteration);
iteration++;
cudaDeviceSynchronize();
} while (*found_new_nodes);
}
// Explores the given node with a single thread
template <typename VertexDataType, typename EdgeDataType>
__device__ void d_ExploreEdges(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
vertex_t *thread_frontier, int &n_frontier_nodes, vertex_t node, int iteration)
{
VertexDataType *vertices = (VertexDataType*)memory;
VertexDataType ¤t_node = vertices[node];
AdjacencyIterator<EdgeDataType> adjacency_iterator(pageAccess<EdgeDataType>(memory, current_node.mem_index, page_size, memory_manager->start_index));
auto n_edges = current_node.neighbours;
for (int i = 0; i < n_edges; i++)
{
vertex_t next_node = adjacency_iterator.getDestination();
if (atomicCAS(frontiers + next_node, NOT_VISITIED, iteration + 1) == NOT_VISITIED)
{
thread_frontier[n_frontier_nodes++] = next_node;
}
adjacency_iterator.advanceIterator(i, memory_manager->edges_per_page, memory, page_size, memory_manager->start_index);
}
}
// Explores the adjacency of the given node with all calling threads in parallel. n_threads should equal the number of calling threads
template<typename VertexDataType, typename EdgeDataType>
__device__ void d_ExploreEdges(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
vertex_t *thread_frontier, int &n_frontier_nodes, vertex_t node, vertex_t start, vertex_t n_threads, int iteration)
{
VertexDataType *vertices = (VertexDataType*)memory;
VertexDataType ¤t_node = vertices[node];
AdjacencyIterator<EdgeDataType> adjacency_iterator(pageAccess<EdgeDataType>(memory, current_node.mem_index, page_size, memory_manager->start_index));
auto n_edges = current_node.neighbours;
auto edges_per_page = memory_manager->edges_per_page;
vertex_t page_index = 0;
auto edge = start;
while (edge < n_edges)
{
auto target_page_index = edge / edges_per_page;
auto page_offset = edge % edges_per_page; // Offset on target page
// Do page traversals until target page
for (; page_index < target_page_index; page_index++)
adjacency_iterator.blockTraversalAbsolute(edges_per_page, memory, page_size, memory_manager->start_index);
auto const next_node = adjacency_iterator.getDestinationAt(page_offset);
if (atomicCAS(frontiers + next_node, NOT_VISITIED, iteration + 1) == NOT_VISITIED)
{
thread_frontier[n_frontier_nodes] = next_node;
n_frontier_nodes++;
}
edge += n_threads;
}
}
// Fills a frontier queue based on individual thread frontiers
// Has to be called by the entire block
template<size_t THREADS_PER_BLOCK>
__device__ void d_FillFrontierQueue(dFrontierQueue &queue, vertex_t *thread_frontier, int n_frontier_nodes)
{
typedef cub::BlockScan<unsigned int, THREADS_PER_BLOCK> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
// Get per-thread offset in queue
unsigned int thread_offset;
BlockScan(temp_storage).ExclusiveSum(n_frontier_nodes, thread_offset);
__syncthreads();
// Get per-block offset in queue. Last thread knows total size, so it does the allocation
__shared__ unsigned int block_offset;
if (threadIdx.x == THREADS_PER_BLOCK - 1)
{
unsigned int total_size = thread_offset + n_frontier_nodes;
if (total_size == 0)
block_offset = std::numeric_limits<unsigned int>::max();
else
block_offset = queue.Allocate(total_size);
}
__syncthreads();
// If we didn't discover any new nodes we don't have anything to copy to the queue
if (block_offset == std::numeric_limits<unsigned int>::max())
return;
// Lastly, copy all discovered nodes to the queue on a per-thread basis
thread_offset += block_offset;
for (int i = 0; i < n_frontier_nodes; i++)
queue.nodes[thread_offset + i] = thread_frontier[i];
}
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__ void d_ExploreEdges_kernel(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
vertex_t node, dFrontierQueue newFrontierQueue, int iteration)
{
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int n_threads = blockDim.x * gridDim.x;
vertex_t thread_frontier[EDGES_PER_THREAD];
int n_frontier_nodes = 0;
d_ExploreEdges<VertexDataType, EdgeDataType>(memory_manager, memory, page_size, frontiers,
thread_frontier, n_frontier_nodes, node, tid, n_threads, iteration);
__syncthreads();
d_FillFrontierQueue<THREADS_PER_BLOCK>(newFrontierQueue, thread_frontier, n_frontier_nodes);
}
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__ void d_bfsDynamicParalellismIteration(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue newFrontierQueue, dFrontierQueue oldFrontierQueue, int iteration)
{
int const edges_per_block = THREADS_PER_BLOCK * EDGES_PER_THREAD;
unsigned int id = threadIdx.x + blockDim.x * blockIdx.x;
vertex_t thread_frontier[EDGES_PER_THREAD];
int n_frontier_nodes = 0;
VertexDataType *vertices = (VertexDataType*)memory;
if (id < *oldFrontierQueue.size)
{
auto const current_node = oldFrontierQueue.nodes[id];
vertex_t n_edges = vertices[current_node].neighbours;
AdjacencyIterator<EdgeDataType> adjacency_iterator(pageAccess<EdgeDataType>(memory, vertices[current_node].mem_index, page_size, memory_manager->start_index));
if (n_edges <= EDGES_PER_THREAD)
{
d_ExploreEdges<VertexDataType, EdgeDataType>(memory_manager, memory, page_size, frontiers,
thread_frontier, n_frontier_nodes, current_node, iteration);
}
else
{
//printf("Using sub-kernel with %d blocks for %d edges\n", (n_edges + edges_per_block - 1) / edges_per_block, n_edges);
d_ExploreEdges_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK> << <(n_edges + edges_per_block - 1) / edges_per_block, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, frontiers, current_node, newFrontierQueue, iteration);
}
}
__syncthreads();
d_FillFrontierQueue<THREADS_PER_BLOCK>(newFrontierQueue, thread_frontier, n_frontier_nodes);
}
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__ void d_bfsDynamicParalellism(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue newFrontierQueue, dFrontierQueue oldFrontierQueue, vertex_t start_node)
{
frontiers[start_node] = 0;
newFrontierQueue.Allocate(1);
newFrontierQueue.nodes[0] = start_node;
int iteration = 0;
do
{
auto temp = oldFrontierQueue;
oldFrontierQueue = newFrontierQueue;
newFrontierQueue = temp;
newFrontierQueue.Reset();
d_bfsDynamicParalellismIteration<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <(*oldFrontierQueue.size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, frontiers, newFrontierQueue, oldFrontierQueue, iteration);
iteration++;
cudaDeviceSynchronize();
} while (*newFrontierQueue.size > 0);
}
template<typename VertexDataType, size_t THREADS_PER_BLOCK, size_t NODES_PER_THREAD, size_t EDGES_PER_THREAD>
__global__ void d_ClassifyNodes_kernel(MemoryManager *memory_manager, memory_t *memory, dFrontierQueue rawFrontier,
dFrontierQueue smallNodesFrontier, dFrontierQueue mediumNodesFrontier, dFrontierQueue largeNodesFrontier)
{
unsigned int const tid = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = blockDim.x * gridDim.x;
VertexDataType *vertices = (VertexDataType*)memory;
vertex_t smallThreadFrontier[NODES_PER_THREAD];
vertex_t mediumThreadFrontier[NODES_PER_THREAD];
vertex_t largeThreadFrontier[NODES_PER_THREAD];
int n_small = 0;
int n_medium = 0;
int n_large = 0;
for (unsigned int i = tid, end = *rawFrontier.size; i < end; i += stride)
{
auto const node = rawFrontier.nodes[i];
auto const n_edges = vertices[node].neighbours;
if (n_edges <= EDGES_PER_THREAD)
{
//printf("Classifying node %u with %u edges as small\n", node, n_edges);
smallThreadFrontier[n_small++] = node;
}
else if (n_edges <= EDGES_PER_THREAD * WARPSIZE)
{
//printf("Classifying node %u with %u edges as medium\n", node, n_edges);
mediumThreadFrontier[n_medium++] = node;
}
else
{
//printf("Classifying node %u with %u edges as large\n", node, n_edges);
largeThreadFrontier[n_large++] = node;
}
}
__syncthreads();
d_FillFrontierQueue<THREADS_PER_BLOCK>(smallNodesFrontier, smallThreadFrontier, n_small);
d_FillFrontierQueue<THREADS_PER_BLOCK>(mediumNodesFrontier, mediumThreadFrontier, n_medium);
d_FillFrontierQueue<THREADS_PER_BLOCK>(largeNodesFrontier, largeThreadFrontier, n_large);
}
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__ void d_ExploreFrontier_kernel(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue frontier, dFrontierQueue newFrontier, vertex_t threads_per_node, unsigned int iteration)
{
unsigned int const tid = threadIdx.x + blockIdx.x * blockDim.x;
vertex_t offset = tid % threads_per_node;
vertex_t thread_frontier[EDGES_PER_THREAD];
int n_frontier_nodes = 0;
unsigned int node_index = tid / threads_per_node;
if (node_index < *frontier.size)
{
auto const node = frontier.nodes[node_index];
//printf("Thread %u exploring node %u\n", tid, node);
d_ExploreEdges<VertexDataType, EdgeDataType>
(memory_manager, memory, page_size, frontiers, thread_frontier, n_frontier_nodes, node, offset, threads_per_node, iteration);
}
__syncthreads();
d_FillFrontierQueue<THREADS_PER_BLOCK>(newFrontier, thread_frontier, n_frontier_nodes);
}
// Gets the size of the largest node in the queue and stores it in current_max_node_size
template <typename VertexDataType, size_t THREADS_PER_BLOCK, size_t ITEMS_PER_THREAD>
__global__
void d_GetMaxNodeSize_kernel(dFrontierQueue queue, memory_t *memory, vertex_t *current_max_node_size)
{
using BlockReduce = cub::BlockReduce<unsigned int, THREADS_PER_BLOCK>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int tid = threadIdx.x;
int start = tid * ITEMS_PER_THREAD;
VertexDataType *vertices = (VertexDataType*)memory;
vertex_t node_sizes[ITEMS_PER_THREAD];
for (int i = 0; i < ITEMS_PER_THREAD; i++)
{
vertex_t node_index = start + i;
if (node_index > *queue.size)
{
// Don't forget to set unused entries to 0
for (; i < ITEMS_PER_THREAD; i++)
node_sizes[i] = 0;
break;
}
vertex_t node = queue.nodes[node_index];
node_sizes[i] = vertices[node].neighbours;
}
vertex_t max_size = BlockReduce(temp_storage).Reduce(node_sizes, cub::Max());
if (tid == 0)
{
*current_max_node_size = max_size;
}
}
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__ void d_BFSPreprocessing(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue rawFrontier, dFrontierQueue smallNodesFrontier, dFrontierQueue mediumNodesFrontier,
dFrontierQueue largeNodesFrontier, dFrontierQueue hugeNodesFrontier, vertex_t *current_max_node_size, vertex_t start_node)
{
size_t const EDGES_PER_BLOCK = THREADS_PER_BLOCK * EDGES_PER_THREAD;
frontiers[start_node] = 0;
rawFrontier.Allocate(1);
rawFrontier.nodes[0] = start_node;
unsigned int iteration = 0;
while (*rawFrontier.size > 0)
{
smallNodesFrontier.Reset();
mediumNodesFrontier.Reset();
largeNodesFrontier.Reset();
//printf("Iteration %u, %u nodes\n", iteration, *rawFrontier.size);
d_ClassifyNodes_kernel<VertexDataType, THREADS_PER_BLOCK, EDGES_PER_THREAD, EDGES_PER_THREAD>
<< <(*rawFrontier.size + EDGES_PER_BLOCK - 1) / EDGES_PER_BLOCK, THREADS_PER_BLOCK >> >
(memory_manager, memory, rawFrontier, smallNodesFrontier, mediumNodesFrontier, largeNodesFrontier);
cudaDeviceSynchronize();
//printf("Queue sizes: %u, %u, %u\n", *smallNodesFrontier.size, *mediumNodesFrontier.size, *largeNodesFrontier.size);
rawFrontier.Reset();
if (*hugeNodesFrontier.size > 0)
{
if (*hugeNodesFrontier.size <= THREADS_PER_BLOCK)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 1> << <1, THREADS_PER_BLOCK >> > (hugeNodesFrontier, memory, current_max_node_size);
else if (*hugeNodesFrontier.size <= THREADS_PER_BLOCK * 4)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 4> << <1, THREADS_PER_BLOCK >> > (hugeNodesFrontier, memory, current_max_node_size);
else if (*hugeNodesFrontier.size <= THREADS_PER_BLOCK * 16)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 16> << <1, THREADS_PER_BLOCK >> > (hugeNodesFrontier, memory, current_max_node_size);
else if (*hugeNodesFrontier.size <= THREADS_PER_BLOCK * 64)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 64> << <1, THREADS_PER_BLOCK >> > (hugeNodesFrontier, memory, current_max_node_size);
else
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 128> << <1, THREADS_PER_BLOCK >> > (hugeNodesFrontier, memory, current_max_node_size);
cudaDeviceSynchronize();
vertex_t n_blocks = (*current_max_node_size + EDGES_PER_BLOCK - 1) / EDGES_PER_BLOCK;
d_ExploreFrontier_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <n_blocks * *hugeNodesFrontier.size, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, frontiers, hugeNodesFrontier, rawFrontier, n_blocks * THREADS_PER_BLOCK, iteration);
}
if (*smallNodesFrontier.size > 0)
{
d_ExploreFrontier_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <(*smallNodesFrontier.size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, frontiers, smallNodesFrontier, rawFrontier, 1, iteration);
}
if (*mediumNodesFrontier.size > 0)
{
d_ExploreFrontier_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <((*mediumNodesFrontier.size * WARPSIZE) + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, frontiers, mediumNodesFrontier, rawFrontier, WARPSIZE, iteration);
}
if (*largeNodesFrontier.size > 0)
{
d_ExploreFrontier_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <*largeNodesFrontier.size, THREADS_PER_BLOCK >> >
(memory_manager, memory, page_size, frontiers, largeNodesFrontier, rawFrontier, THREADS_PER_BLOCK, iteration);
}
cudaDeviceSynchronize();
iteration++;
}
}
// Explores the adjacency of the given node with all calling threads in parallel. n_threads should equal the number of calling threads
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__device__ void d_ExploreEdgesClassification(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
vertex_t *smallThreadFrontier, int &n_small, bool &foundSmallNode,
vertex_t *mediumThreadFrontier, int &n_medium, bool &foundMediumNode,
vertex_t *largeThreadFrontier, int &n_large, bool &foundLargeNode,
vertex_t *hugeThreadFrontier, int &n_huge, bool &foundHugeNode,
vertex_t node, vertex_t start, vertex_t n_threads, int iteration)
{
VertexDataType *vertices = (VertexDataType*)memory;
VertexDataType ¤t_node = vertices[node];
AdjacencyIterator<EdgeDataType> adjacency_iterator(pageAccess<EdgeDataType>(memory, current_node.mem_index, page_size, memory_manager->start_index));
auto n_edges = current_node.neighbours;
auto edges_per_page = memory_manager->edges_per_page;
vertex_t page_index = 0;
auto edge = start;
while (edge < n_edges)
{
auto target_page_index = edge / edges_per_page;
auto page_offset = edge % edges_per_page; // Offset on target page
// Do page traversals until target page
for (; page_index < target_page_index; page_index++)
adjacency_iterator.blockTraversalAbsolute(edges_per_page, memory, page_size, memory_manager->start_index);
auto const next_node = adjacency_iterator.getDestinationAt(page_offset);
if (atomicCAS(frontiers + next_node, NOT_VISITIED, iteration + 1) == NOT_VISITIED)
{
vertex_t const n_edges = vertices[next_node].neighbours;
if (n_edges <= EDGES_PER_THREAD)
{
//printf("Classifying node %u with %u edges as small\n", next_node, n_edges);
smallThreadFrontier[n_small++] = next_node;
foundSmallNode = true;
}
else if (n_edges <= EDGES_PER_THREAD * 32)
{
//printf("Classifying node %u with %u edges as medium\n", next_node, n_edges);
mediumThreadFrontier[n_medium++] = next_node;
foundMediumNode = true;
}
else if (n_edges <= EDGES_PER_THREAD * THREADS_PER_BLOCK)
{
//printf("Classifying node %u with %u edges as large\n", next_node, n_edges);
largeThreadFrontier[n_large++] = next_node;
foundLargeNode = true;
}
else
{
hugeThreadFrontier[n_huge++] = next_node;
foundHugeNode = true;
}
}
edge += n_threads;
}
}
// // Checks a single edge for its discovered status and classifies it if necessary
// template<size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
// __device__
// void d_CheckAndClassifyEdge(unsigned int edge, unsigned int *row_offsets, unsigned int *col_ids, unsigned int *frontiers, unsigned int iteration,
// unsigned int smallThreadFrontier[], int &n_small, bool &foundSmallNode,
// unsigned int mediumThreadFrontier[], int &n_medium, bool &foundMediumNode,
// unsigned int largeThreadFrontier[], int &n_large, bool &foundLargeNode,
// unsigned int hugeThreadFrontier[], int &n_huge, bool &foundHugeNode)
// {
// unsigned int next_node = col_ids[edge];
// if (atomicCAS(frontiers + next_node, NO_DEPTH, iteration + 1) == NO_DEPTH)
// {
// unsigned int const n_edges = row_offsets[next_node + 1] - row_offsets[next_node];
// if (n_edges <= EDGES_PER_THREAD)
// {
// //printf("Classifying node %u with %u edges as small\n", next_node, n_edges);
// smallThreadFrontier[n_small++] = next_node;
// foundSmallNode = true;
// }
// else if (n_edges <= EDGES_PER_THREAD * 32)
// {
// //printf("Classifying node %u with %u edges as medium\n", next_node, n_edges);
// mediumThreadFrontier[n_medium++] = next_node;
// foundMediumNode = true;
// }
// else if (n_edges <= EDGES_PER_BLOCK)
// {
// //printf("Classifying node %u with %u edges as large\n", next_node, n_edges);
// largeThreadFrontier[n_large++] = next_node;
// foundLargeNode = true;
// }
// else
// {
// hugeThreadFrontier[n_huge++] = next_node;
// foundHugeNode = true;
// }
// }
// }
// threads_per_node should divide the number of threads evenly
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__
void d_ExploreFrontierClassification_kernel(dFrontierQueue frontier, MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue newSmallNodesFrontier, dFrontierQueue newMediumNodesFrontier,
dFrontierQueue newLargeNodesFrontier, dFrontierQueue newHugeNodesFrontier,
unsigned int threads_per_node, unsigned int iteration)
{
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int offset = tid % threads_per_node;
vertex_t smallThreadFrontier[EDGES_PER_THREAD];
vertex_t mediumThreadFrontier[EDGES_PER_THREAD];
vertex_t largeThreadFrontier[EDGES_PER_THREAD];
vertex_t hugeThreadFrontier[EDGES_PER_THREAD];
int n_small = 0;
int n_medium = 0;
int n_large = 0;
int n_huge = 0;
__shared__ bool foundSmallNodes;
__shared__ bool foundMediumNodes;
__shared__ bool foundLargeNodes;
__shared__ bool foundHugeNodes;
if (threadIdx.x == 0)
{
foundSmallNodes = false;
foundMediumNodes = false;
foundLargeNodes = false;
foundHugeNodes = false;
}
__syncthreads();
unsigned int node_index = tid / threads_per_node;
if (node_index < *frontier.size)
{
vertex_t node = frontier.nodes[node_index];
d_ExploreEdgesClassification<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>(memory_manager, memory, page_size, frontiers,
smallThreadFrontier, n_small, foundSmallNodes,
mediumThreadFrontier, n_medium, foundMediumNodes,
largeThreadFrontier, n_large, foundLargeNodes,
hugeThreadFrontier, n_huge, foundHugeNodes,
node, offset, threads_per_node, iteration);
}
__syncthreads();
if (foundSmallNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newSmallNodesFrontier, smallThreadFrontier, n_small);
if (foundMediumNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newMediumNodesFrontier, mediumThreadFrontier, n_medium);
if (foundLargeNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newLargeNodesFrontier, largeThreadFrontier, n_large);
if (foundHugeNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newHugeNodesFrontier, hugeThreadFrontier, n_huge);
}
// Explores all nodes in the frontier by having each thread explore a bit of each node
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__
void d_ExploreHugeNodesFrontierClassification_kernel(dFrontierQueue frontier, MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue newSmallNodesFrontier, dFrontierQueue newMediumNodesFrontier,
dFrontierQueue newLargeNodesFrontier, dFrontierQueue newHugeNodesFrontier,
unsigned int threads_per_node, unsigned int iteration)
{
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int offset = tid % threads_per_node;
vertex_t smallThreadFrontier[EDGES_PER_THREAD];
vertex_t mediumThreadFrontier[EDGES_PER_THREAD];
vertex_t largeThreadFrontier[EDGES_PER_THREAD];
vertex_t hugeThreadFrontier[EDGES_PER_THREAD];
int n_small;
int n_medium;
int n_large;
int n_huge;
__shared__ bool foundSmallNodes;
__shared__ bool foundMediumNodes;
__shared__ bool foundLargeNodes;
__shared__ bool foundHugeNodes;
for (unsigned int node_index = 0; node_index < *frontier.size; node_index++)
{
n_small = 0;
n_medium = 0;
n_large = 0;
n_huge = 0;
if (threadIdx.x == 0)
{
foundSmallNodes = false;
foundMediumNodes = false;
foundLargeNodes = false;
foundHugeNodes = false;
}
__syncthreads();
vertex_t node = frontier.nodes[node_index];
d_ExploreEdgesClassification<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>(memory_manager, memory, page_size, frontiers,
smallThreadFrontier, n_small, foundSmallNodes,
mediumThreadFrontier, n_medium, foundMediumNodes,
largeThreadFrontier, n_large, foundLargeNodes,
hugeThreadFrontier, n_huge, foundHugeNodes,
node, offset, threads_per_node, iteration);
__syncthreads();
if (foundSmallNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newSmallNodesFrontier, smallThreadFrontier, n_small);
if (foundMediumNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newMediumNodesFrontier, mediumThreadFrontier, n_medium);
if (foundLargeNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newLargeNodesFrontier, largeThreadFrontier, n_large);
if (foundHugeNodes)
d_FillFrontierQueue<THREADS_PER_BLOCK>(newHugeNodesFrontier, hugeThreadFrontier, n_huge);
}
}
__device__
void d_SwapQueues(dFrontierQueue &queue1, dFrontierQueue &queue2)
{
dFrontierQueue temp = queue1;
queue1 = queue2;
queue2 = temp;
}
template<typename VertexDataType, typename EdgeDataType, size_t EDGES_PER_THREAD, size_t THREADS_PER_BLOCK>
__global__
void d_bfsClassification(MemoryManager *memory_manager, memory_t *memory, int page_size, vertex_t *frontiers,
dFrontierQueue newSmallNodesFrontier, dFrontierQueue newMediumNodesFrontier,
dFrontierQueue newLargeNodesFrontier, dFrontierQueue newHugeNodesFrontier,
dFrontierQueue oldSmallNodesFrontier, dFrontierQueue oldMediumNodesFrontier,
dFrontierQueue oldLargeNodesFrontier, dFrontierQueue oldHugeNodesFrontier,
unsigned int *current_max_node_size, vertex_t starting_node)
{
VertexDataType *vertices = (VertexDataType*)memory;
size_t const EDGES_PER_BLOCK = EDGES_PER_THREAD * THREADS_PER_BLOCK;
unsigned int n_edges = vertices[starting_node].neighbours;
if (n_edges <= EDGES_PER_THREAD)
{
newSmallNodesFrontier.Allocate(1);
newSmallNodesFrontier.nodes[0] = starting_node;
}
else if (n_edges <= EDGES_PER_THREAD * 32)
{
newMediumNodesFrontier.Allocate(1);
newMediumNodesFrontier.nodes[0] = starting_node;
}
else if (n_edges <= EDGES_PER_BLOCK)
{
newLargeNodesFrontier.Allocate(1);
newLargeNodesFrontier.nodes[0] = starting_node;
}
else
{
newHugeNodesFrontier.Allocate(1);
newHugeNodesFrontier.nodes[0] = starting_node;
}
unsigned int iteration = 0;
do
{
//printf("iteration %u\n", iteration);
d_SwapQueues(newSmallNodesFrontier, oldSmallNodesFrontier);
newSmallNodesFrontier.Reset();
d_SwapQueues(newMediumNodesFrontier, oldMediumNodesFrontier);
newMediumNodesFrontier.Reset();
d_SwapQueues(newLargeNodesFrontier, oldLargeNodesFrontier);
newLargeNodesFrontier.Reset();
d_SwapQueues(newHugeNodesFrontier, oldHugeNodesFrontier);
newHugeNodesFrontier.Reset();
//printf("Queue sizes: %u, %u, %u, %u\n", *data.oldSmallNodesFrontier.size, *data.oldMediumNodesFrontier.size, *data.oldLargeNodesFrontier.size, *data.oldHugeNodesFrontier.size);
if (*oldHugeNodesFrontier.size > 0)
{
if (*oldHugeNodesFrontier.size <= THREADS_PER_BLOCK)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 1> << <1, THREADS_PER_BLOCK >> > (oldHugeNodesFrontier, memory, current_max_node_size);
else if (*oldHugeNodesFrontier.size <= THREADS_PER_BLOCK * 4)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 4> << <1, THREADS_PER_BLOCK >> > (oldHugeNodesFrontier, memory, current_max_node_size);
else if (*oldHugeNodesFrontier.size <= THREADS_PER_BLOCK * 16)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 16> << <1, THREADS_PER_BLOCK >> > (oldHugeNodesFrontier, memory, current_max_node_size);
else if (*oldHugeNodesFrontier.size <= THREADS_PER_BLOCK * 64)
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 64> << <1, THREADS_PER_BLOCK >> > (oldHugeNodesFrontier, memory, current_max_node_size);
else
d_GetMaxNodeSize_kernel<VertexDataType, THREADS_PER_BLOCK, 128> << <1, THREADS_PER_BLOCK >> > (oldHugeNodesFrontier, memory, current_max_node_size);
cudaDeviceSynchronize();
unsigned int n_blocks = (*current_max_node_size + EDGES_PER_BLOCK - 1) / EDGES_PER_BLOCK;
d_ExploreHugeNodesFrontierClassification_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK> << <n_blocks, THREADS_PER_BLOCK >> > (
oldHugeNodesFrontier, memory_manager, memory, page_size, frontiers,
newSmallNodesFrontier, newMediumNodesFrontier, newLargeNodesFrontier, newHugeNodesFrontier,
n_blocks * THREADS_PER_BLOCK, iteration);
}
if (*oldSmallNodesFrontier.size > 0)
{
d_ExploreFrontierClassification_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <(*oldSmallNodesFrontier.size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >
(oldSmallNodesFrontier, memory_manager, memory, page_size, frontiers,
newSmallNodesFrontier, newMediumNodesFrontier, newLargeNodesFrontier, newHugeNodesFrontier,
1, iteration);
}
if (*oldMediumNodesFrontier.size > 0)
{
d_ExploreFrontierClassification_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <((*oldMediumNodesFrontier.size * 32) + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >
(oldMediumNodesFrontier, memory_manager, memory, page_size, frontiers,
newSmallNodesFrontier, newMediumNodesFrontier, newLargeNodesFrontier, newHugeNodesFrontier,
32, iteration);
}
if (*oldLargeNodesFrontier.size > 0)
{
d_ExploreFrontierClassification_kernel<VertexDataType, EdgeDataType, EDGES_PER_THREAD, THREADS_PER_BLOCK>
<< <*oldLargeNodesFrontier.size, THREADS_PER_BLOCK >> >
(oldLargeNodesFrontier, memory_manager, memory, page_size, frontiers,
newSmallNodesFrontier, newMediumNodesFrontier, newLargeNodesFrontier, newHugeNodesFrontier,
THREADS_PER_BLOCK, iteration);
}
cudaDeviceSynchronize();
iteration++;
//printf("Queue sizes: %u, %u, %u, %u\n", *data.newSmallNodesFrontier.size, *data.newMediumNodesFrontier.size, *data.newLargeNodesFrontier.size, *data.newHugeNodesFrontier.size);
} while (*newSmallNodesFrontier.size > 0
|| *newMediumNodesFrontier.size > 0
|| *newLargeNodesFrontier.size > 0);
}
}
template <typename VertexDataType, typename EdgeDataType>
std::vector<vertex_t> BFS<VertexDataType, EdgeDataType>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing, vertex_t start_vertex)
{
cudaEvent_t start_allocation, end_allocation, start_kernel, end_kernel, start_copy, end_copy;
int *dev_found_new_nodes;
vertex_t *dev_frontier;
start_clock(start_allocation, end_allocation);
cudaMalloc((void**)&dev_found_new_nodes, sizeof(int));
cudaMalloc((void**)&dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices);
cudaMemset(dev_frontier, faimGraphBFS::NOT_VISITIED, sizeof(vertex_t) * memory_manager->number_vertices);
float allocation_time = end_clock(start_allocation, end_allocation);
start_clock(start_kernel, end_kernel);
faimGraphBFS::d_bfsBasic <VertexDataType, EdgeDataType, 256> <<<1, 1>>>
((MemoryManager*)memory_manager->d_memory, memory_manager->d_data, memory_manager->page_size, dev_found_new_nodes, dev_frontier, start_vertex);
float kernel_time = end_clock(start_kernel, end_kernel);
start_clock(start_copy, end_copy);
std::vector<vertex_t> result;
result.reserve(memory_manager->number_vertices);
cudaMemcpy(&result[0], dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices, cudaMemcpyDeviceToHost);
float copy_time = end_clock(start_copy, end_copy);
cudaFree(dev_found_new_nodes);
cudaFree(dev_frontier);
//printf("algBFSBasic done with kernel time: %fms, allocation time %fms, and result copy time %fms\n", kernel_time, allocation_time, copy_time);
timing.overall_alloc += allocation_time;
timing.overall_kernel += kernel_time;
timing.overall_cpy += copy_time;
return result;
}
template <typename VertexDataType, typename EdgeDataType>
std::vector<vertex_t> BFS<VertexDataType, EdgeDataType>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing, vertex_t start_vertex)
{
cudaEvent_t start_allocation, end_allocation, start_kernel, end_kernel, start_copy, end_copy;
size_t launch_limit;
cudaDeviceGetLimit(&launch_limit, cudaLimitDevRuntimePendingLaunchCount);
cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount, 32768);
vertex_t *dev_frontier;
start_clock(start_allocation, end_allocation);
faimGraphBFS::dFrontierQueue newFrontierQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue oldFrontierQueue(memory_manager->number_vertices);
cudaMalloc((void**)&dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices);
cudaMemset(dev_frontier, faimGraphBFS::NOT_VISITIED, sizeof(vertex_t) * memory_manager->number_vertices);
float allocation_time = end_clock(start_allocation, end_allocation);
start_clock(start_kernel, end_kernel);
faimGraphBFS::d_bfsDynamicParalellism <VertexDataType, EdgeDataType, 64, 256> << <1, 1 >> >
((MemoryManager*)memory_manager->d_memory, memory_manager->d_data, memory_manager->page_size, dev_frontier,
newFrontierQueue, oldFrontierQueue, start_vertex);
float kernel_time = end_clock(start_kernel, end_kernel);
start_clock(start_copy, end_copy);
std::vector<vertex_t> result;
result.reserve(memory_manager->number_vertices);
cudaMemcpy(&result[0], dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices, cudaMemcpyDeviceToHost);
float copy_time = end_clock(start_copy, end_copy);
newFrontierQueue.Free();
oldFrontierQueue.Free();
cudaFree(dev_frontier);
cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount, launch_limit);
//printf("algBFSDynamicParalellism done with kernel time: %fms, allocation time %fms, and result copy time %fms\n", kernel_time, allocation_time, copy_time);
timing.overall_alloc += allocation_time;
timing.overall_kernel += kernel_time;
timing.overall_cpy += copy_time;
return result;
}
template <typename VertexDataType, typename EdgeDataType>
std::vector<vertex_t> BFS<VertexDataType, EdgeDataType>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing, vertex_t start_vertex)
{
cudaEvent_t start_allocation, end_allocation, start_kernel, end_kernel, start_copy, end_copy;
vertex_t *dev_frontier;
unsigned int *current_max_node_size; // Used for huge frontier handling
start_clock(start_allocation, end_allocation);
faimGraphBFS::dFrontierQueue rawFrontierQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue smallNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue mediumNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue largeNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue hugeNodesQueue(memory_manager->number_vertices);
cudaMalloc((void**)&dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices);
cudaMalloc((void**)¤t_max_node_size, sizeof(unsigned int));
cudaMemset(dev_frontier, faimGraphBFS::NOT_VISITIED, sizeof(vertex_t) * memory_manager->number_vertices);
float allocation_time = end_clock(start_allocation, end_allocation);
start_clock(start_kernel, end_kernel);
faimGraphBFS::d_BFSPreprocessing<VertexDataType, EdgeDataType, 4, 128> << <1, 1 >> >
((MemoryManager*)memory_manager->d_memory, memory_manager->d_data, memory_manager->page_size, dev_frontier,
rawFrontierQueue, smallNodesQueue, mediumNodesQueue, largeNodesQueue, hugeNodesQueue, current_max_node_size, start_vertex);
float kernel_time = end_clock(start_kernel, end_kernel);
start_clock(start_copy, end_copy);
std::vector<vertex_t> result;
result.reserve(memory_manager->number_vertices);
cudaMemcpy(&result[0], dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices, cudaMemcpyDeviceToHost);
float copy_time = end_clock(start_copy, end_copy);
rawFrontierQueue.Free();
smallNodesQueue.Free();
mediumNodesQueue.Free();
largeNodesQueue.Free();
hugeNodesQueue.Free();
cudaFree(dev_frontier);
cudaFree(current_max_node_size);
//printf("algBFSPreprocessing done with kernel time: %fms, allocation time %fms\n", kernel_time, allocation_time);
timing.overall_alloc += allocation_time;
timing.overall_kernel += kernel_time;
timing.overall_cpy += copy_time;
return result;
}
template <typename VertexDataType, typename EdgeDataType>
std::vector<vertex_t> BFS<VertexDataType, EdgeDataType>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing, vertex_t start_vertex)
{
cudaEvent_t start_allocation, end_allocation, start_kernel, end_kernel, start_copy, end_copy;
vertex_t *dev_frontier;
unsigned int *current_max_node_size; // Used for huge frontier handling
start_clock(start_allocation, end_allocation);
faimGraphBFS::dFrontierQueue newSmallNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue newMediumNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue newLargeNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue newHugeNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue oldSmallNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue oldMediumNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue oldLargeNodesQueue(memory_manager->number_vertices);
faimGraphBFS::dFrontierQueue oldHugeNodesQueue(memory_manager->number_vertices);
cudaMalloc((void**)&dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices);
cudaMalloc((void**)¤t_max_node_size, sizeof(unsigned int));
cudaMemset(dev_frontier, faimGraphBFS::NOT_VISITIED, sizeof(vertex_t) * memory_manager->number_vertices);
float allocation_time = end_clock(start_allocation, end_allocation);
start_clock(start_kernel, end_kernel);
faimGraphBFS::d_bfsClassification<VertexDataType, EdgeDataType, 16, 256> << <1, 1 >> >
((MemoryManager*)memory_manager->d_memory, memory_manager->d_data, memory_manager->page_size, dev_frontier,
newSmallNodesQueue, newMediumNodesQueue, newLargeNodesQueue, newHugeNodesQueue,
oldSmallNodesQueue, oldMediumNodesQueue, oldLargeNodesQueue, oldHugeNodesQueue,
current_max_node_size, start_vertex);
float kernel_time = end_clock(start_kernel, end_kernel);
start_clock(start_copy, end_copy);
std::vector<vertex_t> result;
result.reserve(memory_manager->number_vertices);
cudaMemcpy(&result[0], dev_frontier, sizeof(vertex_t) * memory_manager->number_vertices, cudaMemcpyDeviceToHost);
float copy_time = end_clock(start_copy, end_copy);
oldSmallNodesQueue.Free();
oldMediumNodesQueue.Free();
oldLargeNodesQueue.Free();
oldHugeNodesQueue.Free();
newSmallNodesQueue.Free();
newMediumNodesQueue.Free();
newLargeNodesQueue.Free();
newHugeNodesQueue.Free();
cudaFree(dev_frontier);
cudaFree(current_max_node_size);
//printf("algBFSClassification done with kernel time: %fms, allocation time %fms, and result copy time %fms\n", kernel_time, allocation_time, copy_time);
timing.overall_alloc += allocation_time;
timing.overall_kernel += kernel_time;
timing.overall_cpy += copy_time;
return result;
}
template std::vector<vertex_t> BFS<VertexData, EdgeData>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing, vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeight>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemantic>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeDataSOA>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeightSOA>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemanticSOA>::algBFSBasic(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeData>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeight>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemantic>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeDataSOA>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeightSOA>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemanticSOA>::algBFSDynamicParalellism(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeData>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeight>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemantic>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeDataSOA>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeightSOA>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemanticSOA>::algBFSPreprocessing(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeData>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeight>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemantic>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexData, EdgeDataSOA>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataWeight, EdgeDataWeightSOA>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
template std::vector<vertex_t> BFS<VertexDataSemantic, EdgeDataSemanticSOA>::algBFSClassification(const std::unique_ptr<MemoryManager>& memory_manager, IndividualTimings& timing,vertex_t start_vertex);
|
1ec1444259f10ef833f023a934baa024de2af16d.hip | // !!! This is a file automatically generated by hipify!!!
# include <stdio.h>
# include <math.h>
# include <stdlib.h>
# include <hip/hip_runtime.h>
# include <time.h>
# include "cuPrintf.cu"
# include "settings.h"
# include "error.cu"
# include "structs.h"
# include "globals.cu"
# include "allocate.cu"
# include "fileoutput.cu"
# include "parameters.cu"
# include "memcopy.cu"
# include "mesh.cu"
# include "mymath.cu"
# include "GlobalSearch.cu"
# include "tracers.cu"
# include "velocity.cu"
# include "unstructured.cu"
# include "cartesian.cu"
# include "integration.cu"
# include "integration3D.cu"
# include "integration2D.cu"
# include "multistep.cu"
# include "clean.cu"
// Main function ////
int main(int argc, const char *argv[]) {
float elapsedTime;
// Read in the data file, check the settings and calculated some of the derived parameters....
ReadInParameters(argc,argv);
CheckParameters();
SetDerivedParameters();
// Time counters which use GPU to time our entire program....
hipEvent_t start, stop, stop_initial;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventCreate( &stop_initial );
hipEventRecord( start, 0 );
// Load Mesh data....
LoadMeshData();
// Allocate Host variables (Variables on CPU)
allocate_host();
// Generate Staggered release
if (Trace_ReleaseStrategy == 1) {
GenerateStaggeredRelease();
for (int kk = 0; kk < N_Total; kk++) {
if (Tracer.Start_time[kk] == Output_TStart)
Tracer.Status[kk] = LAUNCHED;
}
copyresult2vtk_staggered(0, N_Total, Output_TStart, 0.00, 0.00);
}
// Allocating Device variables (These variables are allocated on GPU)
allocate_device();
// Allocate MISC variables
alloc_misc();
// Memcpy some constant parameters . These are mesh parameters for unstructured mesh.
host2device_const();
// Initializing our Tracer
initialize_new();
// Calculate Performance
hipEventRecord( stop_initial, 0 );
hipEventSynchronize( stop_initial );
hipEventElapsedTime( &elapsedTime, start, stop_initial );
printf( "Time for initialization: %3.2f ms\n", elapsedTime );
hipEventDestroy( stop_initial ) ;
// Start Integrating
printf("Integrating ..... \n\n");
int i,j;
int num_integrated = 0;
if (Trace_Compute) {
// Main loop for unstructured mesh
for(i = 0; i < frames; i++) {
MAIN_LOOP = 1; //This flag will memcopy some parameters needed during integrating function .
int N1;
// Time interval for Output array
Data_Loaded_TMin = Output_TStart + i*Output_TDelta;
Data_Loaded_TMax = Data_Loaded_TMin + Output_TDelta;
// Flags for counting intervals into which data will be divided
int sum = 0;
int counter;
double tmin, tmax;
// Check how many velocity data file lies between each output
// Check for all Data times ....
for(j = 0; j < frames_data; j++) {
if ( ( (Data_Loaded_TMin >= DataTime1[j].Start_time) && (Data_Loaded_TMin < (DataTime1[j].Stop_time - TINY)) ) || ( (Data_Loaded_TMax > (DataTime1[j].Start_time + TINY)) && (Data_Loaded_TMax <= (DataTime1[j].Stop_time + TINY)) || ( (Data_Loaded_TMin < (DataTime1[j].Start_time)) && (Data_Loaded_TMax > (DataTime1[j].Stop_time + TINY ) ) ) ) ) {
if (sum == 0) // first data time range .....
counter = j; // this will be first data file set which will be loaded for integration
sum++;
}
}
for (j = 0; j < sum; j++) { // Intermediate Main Loop ....
// assigning tmin first ... tmin represents the starting time of integration for current loaded data set.
if (Data_Loaded_TMin > DataTime1[counter + j].Start_time) {
tmin = Data_Loaded_TMin;
} else {
tmin = DataTime1[counter + j].Start_time;
}
//Assigning value of tmax .... tmax represents the ending time of integration for current loaded data set.
if (Data_Loaded_TMax > (DataTime1[counter + j].Stop_time + TINY)) {
tmax = DataTime1[counter + j].Stop_time;
} else {
tmax = Data_Loaded_TMax;
}
if (Trace_ReleaseStrategy == 0) // Normal release
// Launch Tracers if they were to be launched between time interval spanned by tmin and tmax ....
N_Launches = Launch_Tracer(tmin,tmax, N_Launches );
// Read Velocity data from velocity data files....
readveldata(counter + j);
printf("Data loaded in memory spans from t = %f to %f \n", tmin, tmax );
if (Trace_ReleaseStrategy == 0) {// Normal release
// Number of points to be integrated
N1 = (N_Launches ) * N_Frame; // N1 represents the number of points to be integrated .........
} else
N1 = N_Total;
printf("N = %d points loaded ....\n", N1);
// Number of Threads per block.
nthreads = POINTS_BLOCKSIZE_MAIN;
host2device(N1);
// Calculation of Block size for single chunk of integration kernel ...
if ( N1 % nthreads == 0 ) {
nblocks = (int)(N1/nthreads);
}
else {
nblocks = (int)((N1/nthreads) + 1);
}
if (Int_Type == 1) { // RK4 Routine
// Integrate all points from tmin to tmax on GPU using RK4 routine.
if (Trace_ReleaseStrategy == 1) { // Staggered release where tloc is different for particles.
ComputePoints(tmin, tmax, N1, DataTime1[counter + j].Start_time, DataTime1[counter + j].Stop_time);
} else if (Trace_ReleaseStrategy == 0) { // For Normal Releases, kernels in which value of tloc remains the same.
if (Data_MeshType == UNSTRUCTURED) {
if (Dimensions == 2) {
ComputePoints_Unstructured2D(tmin, tmax, N_Frame, DataTime1[counter + j].Start_time, DataTime1[counter + j].Stop_time, N_Launches);
} else { // 3D integration
ComputePoints_Unstructured3D(tmin, tmax, N_Frame, DataTime1[counter + j].Start_time, DataTime1[counter + j].Stop_time, N_Launches);
}
} else { // Cartesian mesh
if (Dimensions == 2) { // 2D cartesian
ComputePoints_Cartesian2D(tmin, tmax, N_Frame, DataTime1[counter + j].Start_time, DataTime1[counter + j].Stop_time, N_Launches);
} else { // 3D cartesian integration
ComputePoints_Cartesian3D(tmin, tmax, N_Frame, DataTime1[counter + j].Start_time, DataTime1[counter + j].Stop_time, N_Launches);
}
}
}
} else if (Int_Type == 3) { // Second order Adams-Bashford method.
if (Trace_ReleaseStrategy == 1) { // Staggered release where tloc is different for particles.
FatalError("Multistep Methods are currently not supported for staggered release. \n\n");
} else if (Trace_ReleaseStrategy == 0) { // Normal release
// Compute point function for normal release in multistep function
computePoints_AdamsBashford2(tmin, tmax, N_Frame, DataTime1[counter + j].Start_time, DataTime1[counter + j].Stop_time, N_Launches);
}
}
// copy results back into host arrays ......
device2host(N1);
int ii;
num_integrated = 0;
if (Trace_ReleaseStrategy == 0) {
// Crude manner to check for number of points integrated. (This Logic does not give the exact number of points integrated. Also this number will deviate from the number from flowVC file.)
for(ii = 0; ii < N1; ii++) {
if (Data_MeshType == UNSTRUCTURED) {
// Check element index for unstructured
if (Tracer.ElementIndex[ii] != -1) {
num_integrated++;
//printf("x = %f , y = %f and z = %f \n", Tracer.x[ii], Tracer.y[ii], Tracer.z[ii]);
}
} else {
// For catresian check if a point is in the domain.
if (Tracer.LeftDomain[ii] != 1)
num_integrated++;
}
}
printf("N = %d points integrated ....\n\n", num_integrated);
} else {
for(ii = 0; ii < N1; ii++) {
if (Data_MeshType == UNSTRUCTURED) {
// Check element index for unstructured
if (Tracer.ElementIndex[ii] != -1) {
if (Tracer.Stop_time[ii] < tmin || Tracer.Start_time[ii] > tmax) {
continue;
} else {
num_integrated++;
}
}
} else {
// For catresian check if a point is in the domain.
if (Tracer.LeftDomain[ii] != 1)
num_integrated++;
}
}
printf("N = %d points integrated ....\n\n", num_integrated);
}
}
if (Trace_ReleaseStrategy == 0) {
// Copy results to Output file.
copyresult2vtk(i + 1, (N_Launches ), Output_time[i + 1]);
copyresult2bin(i + 1, (N_Launches ), Output_time[i + 1]);
printf("Output %d released ....\n\n\n", i+1);
} else {
// Copy results to Output file.
copyresult2vtk_staggered(i + 1, N_Total, Output_time[i + 1], Data_Loaded_TMin, Data_Loaded_TMax);
copyresult2bin_staggered(i + 1, N_Total, Output_time[i + 1], Data_Loaded_TMin, Data_Loaded_TMax);
printf("Output %d released ....\n\n\n", i+1);
}
} // End of main loop for unstructured mesh
}
// Calculate Performance
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsedTime, start, stop );
printf( "Time to complete entire program: %3.2f ms\n", elapsedTime );
hipEventDestroy( start ) ;
hipEventDestroy( stop ) ;
// Cleaning memory space.
clean_all();
return 0;
}
| 1ec1444259f10ef833f023a934baa024de2af16d.cu | # include <stdio.h>
# include <math.h>
# include <stdlib.h>
# include <cuda.h>
# include <time.h>
# include "cuPrintf.cu"
# include "settings.h"
# include "error.cu"
# include "structs.h"
# include "globals.cu"
# include "allocate.cu"
# include "fileoutput.cu"
# include "parameters.cu"
# include "memcopy.cu"
# include "mesh.cu"
# include "mymath.cu"
# include "GlobalSearch.cu"
# include "tracers.cu"
# include "velocity.cu"
# include "unstructured.cu"
# include "cartesian.cu"
# include "integration.cu"
# include "integration3D.cu"
# include "integration2D.cu"
# include "multistep.cu"
# include "clean.cu"
// Main function ////
int main(int argc, const char *argv[]) {
float elapsedTime;
// Read in the data file, check the settings and calculated some of the derived parameters....
ReadInParameters(argc,argv);
CheckParameters();
SetDerivedParameters();
// Time counters which use GPU to time our entire program....
cudaEvent_t start, stop, stop_initial;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventCreate( &stop_initial );
cudaEventRecord( start, 0 );
// Load Mesh data....
LoadMeshData();
// Allocate Host variables (Variables on CPU)
allocate_host();
// Generate Staggered release
if (Trace_ReleaseStrategy == 1) {
GenerateStaggeredRelease();
for (int kk = 0; kk < N_Total; kk++) {
if (Tracer.Start_time[kk] == Output_TStart)
Tracer.Status[kk] = LAUNCHED;
}
copyresult2vtk_staggered(0, N_Total, Output_TStart, 0.00, 0.00);
}
// Allocating Device variables (These variables are allocated on GPU)
allocate_device();
// Allocate MISC variables
alloc_misc();
// Memcpy some constant parameters . These are mesh parameters for unstructured mesh.
host2device_const();
// Initializing our Tracer
initialize_new();
// Calculate Performance
cudaEventRecord( stop_initial, 0 );
cudaEventSynchronize( stop_initial );
cudaEventElapsedTime( &elapsedTime, start, stop_initial );
printf( "Time for initialization: %3.2f ms\n", elapsedTime );
cudaEventDestroy( stop_initial ) ;
// Start Integrating
printf("Integrating ..... \n\n");
int i,j;
int num_integrated = 0;
if (Trace_Compute) {
// Main loop for unstructured mesh
for(i = 0; i < frames; i++) {
MAIN_LOOP = 1; //This flag will memcopy some parameters needed during integrating function .
int N1;
// Time interval for Output array
Data_Loaded_TMin = Output_TStart + i*Output_TDelta;
Data_Loaded_TMax = Data_Loaded_TMin + Output_TDelta;
// Flags for counting intervals into which data will be divided
int sum = 0;
int counter;
double tmin, tmax;
// Check how many velocity data file lies between each output
// Check for all Data times ....
for(j = 0; j < frames_data; j++) {
if ( ( (Data_Loaded_TMin >= DataTime1[j].Start_time) && (Data_Loaded_TMin < (DataTime1[j].Stop_time - TINY)) ) || ( (Data_Loaded_TMax > (DataTime1[j].Start_time + TINY)) && (Data_Loaded_TMax <= (DataTime1[j].Stop_time + TINY)) || ( (Data_Loaded_TMin < (DataTime1[j].Start_time)) && (Data_Loaded_TMax > (DataTime1[j].Stop_time + TINY ) ) ) ) ) {
if (sum == 0) // first data time range .....
counter = j; // this will be first data file set which will be loaded for integration
sum++;
}
}
for (j = 0; j < sum; j++) { // Intermediate Main Loop ....
// assigning tmin first ... tmin represents the starting time of integration for current loaded data set.
if (Data_Loaded_TMin > DataTime1[counter + j].Start_time) {
tmin = Data_Loaded_TMin;
} else {
tmin = DataTime1[counter + j].Start_time;
}
//Assigning value of tmax .... tmax represents the ending time of integration for current loaded data set.
if (Data_Loaded_TMax > (DataTime1[counter + j].Stop_time + TINY)) {
tmax = DataTime1[counter + j].Stop_time;
} else {
tmax = Data_Loaded_TMax;
}
if (Trace_ReleaseStrategy == 0) // Normal release
// Launch Tracers if they were to be launched between time interval spanned by tmin and tmax ....
N_Launches = Launch_Tracer(tmin,tmax, N_Launches );
// Read Velocity data from velocity data files....
readveldata(counter + j);
printf("Data loaded in memory spans from t = %f to %f \n", tmin, tmax );
if (Trace_ReleaseStrategy == 0) {// Normal release
// Number of points to be integrated
N1 = (N_Launches ) * N_Frame; // N1 represents the number of points to be integrated .........
} else
N1 = N_Total;
printf("N = %d points loaded ....\n", N1);
// Number of Threads per block.
nthreads = POINTS_BLOCKSIZE_MAIN;
host2device(N1);
// Calculation of Block size for single chunk of integration kernel ...
if ( N1 % nthreads == 0 ) {
nblocks = (int)(N1/nthreads);
}
else {
nblocks = (int)((N1/nthreads) + 1);
}
if (Int_Type == 1) { // RK4 Routine
// Integrate all points from tmin to tmax on GPU using RK4 routine.
if (Trace_ReleaseStrategy == 1) { // Staggered release where tloc is different for particles.
ComputePoints(tmin, tmax, N1, DataTime1[counter + j].Start_time, DataTime1[counter + j].Stop_time);
} else if (Trace_ReleaseStrategy == 0) { // For Normal Releases, kernels in which value of tloc remains the same.
if (Data_MeshType == UNSTRUCTURED) {
if (Dimensions == 2) {
ComputePoints_Unstructured2D(tmin, tmax, N_Frame, DataTime1[counter + j].Start_time, DataTime1[counter + j].Stop_time, N_Launches);
} else { // 3D integration
ComputePoints_Unstructured3D(tmin, tmax, N_Frame, DataTime1[counter + j].Start_time, DataTime1[counter + j].Stop_time, N_Launches);
}
} else { // Cartesian mesh
if (Dimensions == 2) { // 2D cartesian
ComputePoints_Cartesian2D(tmin, tmax, N_Frame, DataTime1[counter + j].Start_time, DataTime1[counter + j].Stop_time, N_Launches);
} else { // 3D cartesian integration
ComputePoints_Cartesian3D(tmin, tmax, N_Frame, DataTime1[counter + j].Start_time, DataTime1[counter + j].Stop_time, N_Launches);
}
}
}
} else if (Int_Type == 3) { // Second order Adams-Bashford method.
if (Trace_ReleaseStrategy == 1) { // Staggered release where tloc is different for particles.
FatalError("Multistep Methods are currently not supported for staggered release. \n\n");
} else if (Trace_ReleaseStrategy == 0) { // Normal release
// Compute point function for normal release in multistep function
computePoints_AdamsBashford2(tmin, tmax, N_Frame, DataTime1[counter + j].Start_time, DataTime1[counter + j].Stop_time, N_Launches);
}
}
// copy results back into host arrays ......
device2host(N1);
int ii;
num_integrated = 0;
if (Trace_ReleaseStrategy == 0) {
// Crude manner to check for number of points integrated. (This Logic does not give the exact number of points integrated. Also this number will deviate from the number from flowVC file.)
for(ii = 0; ii < N1; ii++) {
if (Data_MeshType == UNSTRUCTURED) {
// Check element index for unstructured
if (Tracer.ElementIndex[ii] != -1) {
num_integrated++;
//printf("x = %f , y = %f and z = %f \n", Tracer.x[ii], Tracer.y[ii], Tracer.z[ii]);
}
} else {
// For catresian check if a point is in the domain.
if (Tracer.LeftDomain[ii] != 1)
num_integrated++;
}
}
printf("N = %d points integrated ....\n\n", num_integrated);
} else {
for(ii = 0; ii < N1; ii++) {
if (Data_MeshType == UNSTRUCTURED) {
// Check element index for unstructured
if (Tracer.ElementIndex[ii] != -1) {
if (Tracer.Stop_time[ii] < tmin || Tracer.Start_time[ii] > tmax) {
continue;
} else {
num_integrated++;
}
}
} else {
// For catresian check if a point is in the domain.
if (Tracer.LeftDomain[ii] != 1)
num_integrated++;
}
}
printf("N = %d points integrated ....\n\n", num_integrated);
}
}
if (Trace_ReleaseStrategy == 0) {
// Copy results to Output file.
copyresult2vtk(i + 1, (N_Launches ), Output_time[i + 1]);
copyresult2bin(i + 1, (N_Launches ), Output_time[i + 1]);
printf("Output %d released ....\n\n\n", i+1);
} else {
// Copy results to Output file.
copyresult2vtk_staggered(i + 1, N_Total, Output_time[i + 1], Data_Loaded_TMin, Data_Loaded_TMax);
copyresult2bin_staggered(i + 1, N_Total, Output_time[i + 1], Data_Loaded_TMin, Data_Loaded_TMax);
printf("Output %d released ....\n\n\n", i+1);
}
} // End of main loop for unstructured mesh
}
// Calculate Performance
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
printf( "Time to complete entire program: %3.2f ms\n", elapsedTime );
cudaEventDestroy( start ) ;
cudaEventDestroy( stop ) ;
// Cleaning memory space.
clean_all();
return 0;
}
|
123e609bae9584b12d7b6d89bf9f14b0a61d0c94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**************************************************************
* NOTE: This code respects the initial interface, so that it
* succesfully passes the online grader. The rest versions
* optimize for speed and space by using floats and 8-bit ints
*
**************************************************************/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include "../inc/ising.h"
#define diff 1e-6f
// Debugging function that prints the first nXn elements of a sizeXsize array
void print_nn_array(int *x, int n, int size){
for(int i=0; i<n; ++i){
for(int j=0; j<n; ++j){
printf("%d ", x[(i + 20)*size + 20 + j]);
}
printf("\n");
}
printf("\n");
}
// CUDA Kernel
__global__ void computeMoment(int *readArr, int *writeArr, double *weightArr, int n){
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
// If coordinates are between boundaries
// update the write array accordingly
if(row < 517 && col < 517){
float influence = 0.0f;
for (int i=-2; i<3; i++)
{
for (int j=-2; j<3; j++)
{
//add extra n so that modulo behaves like mathematics modulo
//that is return only positive values
int y = (row+i+n)%n;
int x = (col+j+n)%n;
influence += weightArr[i*5 + j]*readArr[y*n + x];
}
}
writeArr[row*n + col] = readArr[row*n + col];
if (influence<-diff) writeArr[row*n + col] = -1;
else if (influence>diff) writeArr[row*n + col] = 1;
}
__syncthreads();
}
void ising(int *G, double *w, int k, int n)
{
// Allocate memory for the 3 arrays with hipMallocManaged()
// because they will be used inside the kernel
// The return err values are for debugging only
int *readArr, *writeArr;
hipError_t err1 = hipMallocManaged(&readArr, n*n*sizeof(int));
hipError_t err2 = hipMallocManaged(&writeArr,n*n*sizeof(int));
double *weightArr_d;
hipError_t er3 = hipMallocManaged(&weightArr_d, 5*5*sizeof(double));
// Copy the contents of input arrays inside
// the ones we will use inside kernel
memcpy(readArr, G, n*n*sizeof(int));
memcpy(weightArr_d, w, 5*5*sizeof(double));
//set valid indexes to [-2..2][-2..2]
weightArr_d = &weightArr_d[2*5 + 2];
weightArr_d[0] = 0.0;
for (int i=1; i<=k; i++)
{
// Create blocks of size 32x32 threads per block
// The number of blocks will adjust to fit the input n
dim3 dimBlock(32, 32);
int gridSz = (n + 32)/ 32;
dim3 dimGrid(gridSz, gridSz);
// Run the kernel in GPU
hipLaunchKernelGGL(( computeMoment), dim3(dimGrid), dim3(dimBlock), 0, 0, readArr, writeArr, weightArr_d, n);
// Uncomment below to check for launch errors
//printf("%s\n", hipGetErrorString(hipGetLastError()));
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Swap read and write arrays
int *temp = readArr;
readArr = writeArr;
writeArr = temp;
}
//The final result now is in readArr. Copy the contents
// in array G
memcpy(G, readArr, n*n*sizeof(int));
hipFree( readArr );
hipFree( writeArr );
hipFree( weightArr_d );
}
int main()
{
FILE* fin = fopen("../test/conf-init.bin","rb");
FILE* fout = fopen("../test/conf-11.ans","wb");
int n=517, k=11;
double weights[5][5] = { {0.004f,0.016f,0.026f,0.016f,0.004f},
{0.016f,0.071f,0.117f,0.071f,0.016f},
{0.026f,0.117f,0.000f,0.117f,0.026f},
{0.016f,0.071f,0.117f,0.071f,0.016f},
{0.004f,0.016f,0.026f,0.016f,0.004f}};
int *latticeArr = (int *) malloc(n*n*sizeof(int));
//read from binary
for (int row=0; row<n; row++)
{
for (int col=0; col<n; col++)
{
int spin;
fread(&spin, sizeof(int), 1, fin);
latticeArr[row*n + col] = spin;
}
}
ising(latticeArr, (double*)weights, k, n);
//write to binary
for (int row=0; row<n; row++)
{
for (int col=0; col<n; col++)
{
int spin = latticeArr[row*n + col];
fwrite(&spin, sizeof(int), 1, fout);
}
}
free( latticeArr );
return 0;
} | 123e609bae9584b12d7b6d89bf9f14b0a61d0c94.cu | /**************************************************************
* NOTE: This code respects the initial interface, so that it
* succesfully passes the online grader. The rest versions
* optimize for speed and space by using floats and 8-bit ints
*
**************************************************************/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include "../inc/ising.h"
#define diff 1e-6f
// Debugging function that prints the first nXn elements of a sizeXsize array
void print_nn_array(int *x, int n, int size){
for(int i=0; i<n; ++i){
for(int j=0; j<n; ++j){
printf("%d ", x[(i + 20)*size + 20 + j]);
}
printf("\n");
}
printf("\n");
}
// CUDA Kernel
__global__ void computeMoment(int *readArr, int *writeArr, double *weightArr, int n){
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
// If coordinates are between boundaries
// update the write array accordingly
if(row < 517 && col < 517){
float influence = 0.0f;
for (int i=-2; i<3; i++)
{
for (int j=-2; j<3; j++)
{
//add extra n so that modulo behaves like mathematics modulo
//that is return only positive values
int y = (row+i+n)%n;
int x = (col+j+n)%n;
influence += weightArr[i*5 + j]*readArr[y*n + x];
}
}
writeArr[row*n + col] = readArr[row*n + col];
if (influence<-diff) writeArr[row*n + col] = -1;
else if (influence>diff) writeArr[row*n + col] = 1;
}
__syncthreads();
}
void ising(int *G, double *w, int k, int n)
{
// Allocate memory for the 3 arrays with cudaMallocManaged()
// because they will be used inside the kernel
// The return err values are for debugging only
int *readArr, *writeArr;
cudaError_t err1 = cudaMallocManaged(&readArr, n*n*sizeof(int));
cudaError_t err2 = cudaMallocManaged(&writeArr,n*n*sizeof(int));
double *weightArr_d;
cudaError_t er3 = cudaMallocManaged(&weightArr_d, 5*5*sizeof(double));
// Copy the contents of input arrays inside
// the ones we will use inside kernel
memcpy(readArr, G, n*n*sizeof(int));
memcpy(weightArr_d, w, 5*5*sizeof(double));
//set valid indexes to [-2..2][-2..2]
weightArr_d = &weightArr_d[2*5 + 2];
weightArr_d[0] = 0.0;
for (int i=1; i<=k; i++)
{
// Create blocks of size 32x32 threads per block
// The number of blocks will adjust to fit the input n
dim3 dimBlock(32, 32);
int gridSz = (n + 32)/ 32;
dim3 dimGrid(gridSz, gridSz);
// Run the kernel in GPU
computeMoment<<<dimGrid, dimBlock>>> (readArr, writeArr, weightArr_d, n);
// Uncomment below to check for launch errors
//printf("%s\n", cudaGetErrorString(cudaGetLastError()));
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Swap read and write arrays
int *temp = readArr;
readArr = writeArr;
writeArr = temp;
}
//The final result now is in readArr. Copy the contents
// in array G
memcpy(G, readArr, n*n*sizeof(int));
cudaFree( readArr );
cudaFree( writeArr );
cudaFree( weightArr_d );
}
int main()
{
FILE* fin = fopen("../test/conf-init.bin","rb");
FILE* fout = fopen("../test/conf-11.ans","wb");
int n=517, k=11;
double weights[5][5] = { {0.004f,0.016f,0.026f,0.016f,0.004f},
{0.016f,0.071f,0.117f,0.071f,0.016f},
{0.026f,0.117f,0.000f,0.117f,0.026f},
{0.016f,0.071f,0.117f,0.071f,0.016f},
{0.004f,0.016f,0.026f,0.016f,0.004f}};
int *latticeArr = (int *) malloc(n*n*sizeof(int));
//read from binary
for (int row=0; row<n; row++)
{
for (int col=0; col<n; col++)
{
int spin;
fread(&spin, sizeof(int), 1, fin);
latticeArr[row*n + col] = spin;
}
}
ising(latticeArr, (double*)weights, k, n);
//write to binary
for (int row=0; row<n; row++)
{
for (int col=0; col<n; col++)
{
int spin = latticeArr[row*n + col];
fwrite(&spin, sizeof(int), 1, fout);
}
}
free( latticeArr );
return 0;
} |
33710460db38d4d33ac7329d203d7168fb080932.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DEVICE_CODE
#include "Parallel_CellList.h"
#include "Parallel_Interface.h"
#include "Parallel_CellList_device.h"
#include "Parallel_Algorithm.h"
#include "Parallel_BondList.h"
#include "Auxiliary.h"
#include "Parallel_Timer.h"
#include "Parallel_Interface.h"
#include "Parallel_Auxiliary.h"
#include "compile_error_mixcode.h"
void Parallel::DeviceCellListedMDData::
initZeroCell ()
{
IndexType numThreadsInCell = Parallel::Interface::numThreadsInCell();
dim3 gridDim = toGridDim(numCell.x*numCell.y*numCell.z);
hipLaunchKernelGGL(( Parallel::CudaGlobal::initZeroCell)
, dim3(gridDim), dim3(numThreadsInCell), 0, 0,
numCell,
numAtomInCell);
checkCUDAError ("DeviceCellListedMDData::initZeroCell");
}
void Parallel::DeviceCellListedMDData::
calNumCell (const ScalorType & input_rlist,
const IndexType & input_dividelevel,
const BoxDirection_t & bdir,
IntVectorType & result_numCell,
VectorType & result_frameLow,
VectorType & result_frameUp)
{
int Nx, Ny, Nz;
Parallel::Interface::numProcDim (Nx, Ny, Nz);
int ix, iy, iz;
Parallel::Interface::rankToCartCoord (Parallel::Interface::myRank(), ix, iy, iz);
double dx, dy, dz;
dx = getGlobalBoxSize().x / double(Nx);
dy = getGlobalBoxSize().y / double(Ny);
dz = getGlobalBoxSize().z / double(Nz);
result_frameLow.x = dx * ix;
result_frameLow.y = dy * iy;
result_frameLow.z = dz * iz;
result_frameUp.x = result_frameLow.x + dx;
result_frameUp.y = result_frameLow.y + dy;
result_frameUp.z = result_frameLow.z + dz;
bool CellOnX, CellOnY, CellOnZ;
CellOnX = bdir & RectangularBoxGeometry::mdRectBoxDirectionX;
CellOnY = bdir & RectangularBoxGeometry::mdRectBoxDirectionY;
CellOnZ = bdir & RectangularBoxGeometry::mdRectBoxDirectionZ;
double input_rlisti = 1./input_rlist;
if (CellOnX ) result_numCell.x = int ( floor(dx * input_rlisti) );
else result_numCell.x = 1;
if (CellOnY ) result_numCell.y = int ( floor(dy * input_rlisti) );
else result_numCell.y = 1;
if (CellOnZ ) result_numCell.z = int ( floor(dz * input_rlisti) );
else result_numCell.z = 1;
if ((CellOnX && result_numCell.x < 3) ||
(CellOnY && result_numCell.y < 3) ||
(CellOnZ && result_numCell.z < 3) ){
printf("input_rlist is %f, nx %d ny %d nz %d\n", input_rlist, result_numCell.x, result_numCell.y, result_numCell.z);
throw MDExcptCellList ("Number of cell on one direction is less than 3");
}
// add ghost cell
VectorType dcell;
dcell.x = (result_frameUp.x - result_frameLow.x) / result_numCell.x;
dcell.y = (result_frameUp.y - result_frameLow.y) / result_numCell.y;
dcell.z = (result_frameUp.z - result_frameLow.z) / result_numCell.z;
result_frameUp.x += dcell.x;
result_frameUp.y += dcell.y;
result_frameUp.z += dcell.z;
result_frameLow.x -= dcell.x;
result_frameLow.y -= dcell.y;
result_frameLow.z -= dcell.z;
result_numCell.x += 2;
result_numCell.y += 2;
result_numCell.z += 2;
if (CellOnX) result_numCell.x *= input_dividelevel;
if (CellOnY) result_numCell.y *= input_dividelevel;
if (CellOnZ) result_numCell.z *= input_dividelevel;
}
void Parallel::DeviceCellListedMDData::
initCellStructure (const ScalorType & rlist_,
const IndexType & devideLevel_,
const BoxDirection_t & bdir)
{
rlist = rlist_;
devideLevel = devideLevel_;
calNumCell (rlist, devideLevel, bdir, numCell, frameLow, frameUp);
DeviceMDData bkData (*this);
IndexType numThreadsInCell = Parallel::Interface::numThreadsInCell ();
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
// maxNumNeighborCell = 1;
// if (CellOnX) maxNumNeighborCell *= devideLevel * 2 + 1;
// if (CellOnY) maxNumNeighborCell *= devideLevel * 2 + 1;
// if (CellOnZ) maxNumNeighborCell *= devideLevel * 2 + 1;
if (numThreadsInCell * totalNumCell > DeviceMDData::memSize()){
DeviceMDData::easyMalloc (numThreadsInCell * totalNumCell);
}
numData() = totalNumCell * numThreadsInCell;
// printf ("rank %d, numcell %d\n", Parallel::Interface::myRank(), totalNumCell);
// getchar ();
// mallocCell (totalNumCell, maxNumNeighborCell);
easyMallocCell (totalNumCell);
initZeroCell ();
IndexType numThreadBlock = numThreadsInCell;
dim3 gridDim = toGridDim(numCell.x*numCell.y*numCell.z);
hipLaunchKernelGGL(( Parallel::CudaGlobal::formCellStructure)
, dim3(gridDim), dim3(numThreadBlock) , 0, 0,
frameLow,
frameUp,
numCell,
numAtomInCell,
bkData.numData(),
bkData.dptr_coordinate(),
bkData.dptr_coordinateNoi(),
bkData.dptr_velocityX(),
bkData.dptr_velocityY(),
bkData.dptr_velocityZ(),
bkData.dptr_globalIndex(),
bkData.dptr_type(),
bkData.dptr_mass(),
bkData.dptr_charge(),
coord,
coordNoi,
velox,
veloy,
veloz,
globalIndex,
type,
mass,
charge,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::formCellStructure");
err.updateHost();
err.check ("DeviceCellListedMDData::formCellSturcture");
}
bool Parallel::DeviceCellListedMDData::
reinitCellStructure (const ScalorType & rlist_,
const IndexType & devideLevel_,
const BoxDirection_t & bdir)
{
IntVectorType tmpNumCell;
VectorType tmpFrameLow, tmpFrameUp;
calNumCell (rlist_, devideLevel_, bdir, tmpNumCell, tmpFrameLow, tmpFrameUp);
if (tmpNumCell.x == numCell.x &&
tmpNumCell.y == numCell.y &&
tmpNumCell.z == numCell.z ){
rlist = rlist_;
devideLevel = devideLevel_;
return false;
}
DeviceCellListedMDData bkData ;
bkData.copyFromDevice (*this, MDDataItemMask_All);
rlist = rlist_;
devideLevel = devideLevel_;
numCell = tmpNumCell;
frameLow = tmpFrameLow;
frameUp = tmpFrameUp;
IndexType numThreadsInCell = Parallel::Interface::numThreadsInCell ();
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
if (numThreadsInCell * totalNumCell > DeviceMDData::memSize()){
DeviceMDData::easyMalloc (numThreadsInCell * totalNumCell,
getMaxNumBond(), getMaxNumAngle(), getMaxNumDihedral());
}
numData() = totalNumCell * numThreadsInCell;
easyMallocCell (totalNumCell);
initZeroCell ();
IndexType numThreadBlock = numThreadsInCell;
IntVectorType numCell_ = bkData.getNumCell();
dim3 gridDim = toGridDim(numCell_.x*numCell_.y*numCell_.z);
IndexType * forwardMap;
hipMalloc ((void**)&forwardMap,
sizeof(IndexType) * numCell_.x * numCell_.y * numCell_.z * numThreadsInCell);
checkCUDAError ("DeviceCellListedMDData::reinitCellStructure malloc forwardMap");
hipLaunchKernelGGL(( Parallel::CudaGlobal::reinitCellStructure_calForwardMap)
, dim3(gridDim), dim3(numThreadBlock) , 0, 0,
bkData.dptr_numAtomInCell(),
bkData.dptr_coordinate(),
frameLow,
frameUp,
numCell,
numAtomInCell,
forwardMap,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::reinitCellStructure_calForwardMap");
err.updateHost();
err.check ("DeviceCellListedMDData::reinitCellStructure_calForwardMap");
hipLaunchKernelGGL(( Parallel::CudaGlobal::reinitCellStructure_step1)
, dim3(gridDim), dim3(numThreadBlock) , 0, 0,
bkData.dptr_numAtomInCell(),
bkData.dptr_coordinate(),
bkData.dptr_coordinateNoi(),
bkData.dptr_velocityX(),
bkData.dptr_velocityY(),
bkData.dptr_velocityZ(),
bkData.dptr_forceX(),
bkData.dptr_forceY(),
bkData.dptr_forceZ(),
bkData.dptr_globalIndex(),
bkData.dptr_type(),
bkData.dptr_mass(),
bkData.dptr_charge(),
forwardMap,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge);
checkCUDAError ("DeviceCellListedMDData::reinitCellStructure_step1");
hipLaunchKernelGGL(( Parallel::CudaGlobal::reinitCellStructure_step2)
, dim3(gridDim), dim3(numThreadBlock), 0, 0,
bkData.dptr_numAtomInCell(),
bkData.getMaxNumBond(),
bkData.dptr_numBond(),
bkData.dptr_bondIndex(),
bkData.dptr_bondNeighbor_globalIndex(),
bkData.getMaxNumAngle(),
bkData.dptr_numAngle(),
bkData.dptr_angleIndex(),
bkData.dptr_anglePosi(),
bkData.dptr_angleNeighbor_globalIndex(),
bkData.getMaxNumDihedral(),
bkData.dptr_numDihedral(),
bkData.dptr_dihedralIndex(),
bkData.dptr_dihedralPosi(),
bkData.dptr_dihedralNeighbor_globalIndex(),
bkData.getBondTopStride(),
forwardMap,
dptr_numBond(),
dptr_bondIndex(),
dptr_bondNeighbor_globalIndex(),
dptr_numAngle(),
dptr_angleIndex(),
dptr_anglePosi(),
dptr_angleNeighbor_globalIndex(),
dptr_numDihedral(),
dptr_dihedralIndex(),
dptr_dihedralPosi(),
dptr_dihedralNeighbor_globalIndex(),
getBondTopStride());
hipFree (forwardMap);
return true;
}
void Parallel::DeviceCellListedMDData::
rebuild ()
{
IndexType numThreadBlock = Parallel::Interface::numThreadsInCell();
IndexType totalNumCell = numCell.x*numCell.y*numCell.z;
dim3 gridDim = toGridDim(totalNumCell);
IndexType * bk_numAtomInCell;
hipMalloc ((void**)&bk_numAtomInCell, totalNumCell * sizeof(IndexType));
hipMemcpy (bk_numAtomInCell, numAtomInCell, totalNumCell * sizeof(IndexType),
hipMemcpyDeviceToDevice);
checkCUDAError ("DeviceCellListedMDData::rebuild malloc backup");
if (getMaxNumBond() == 0 &&
getMaxNumAngle() == 0 &&
getMaxNumDihedral() == 0){
hipLaunchKernelGGL(( Parallel::CudaGlobal::rebuildCellList_step1)
, dim3(gridDim), dim3(numThreadBlock), 0, 0,
frameLow,
frameUp,
numCell,
bk_numAtomInCell,
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::rebuild step1");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step1");
hipLaunchKernelGGL(( Parallel::CudaGlobal::rebuildCellList_step2)
, dim3(gridDim), dim3(numThreadBlock), numThreadBlock*sizeof(IndexType)*3, 0,
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::rebuild step2");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step2");
}
else{
hipLaunchKernelGGL(( Parallel::CudaGlobal::rebuildCellList_step1)
, dim3(gridDim), dim3(numThreadBlock), 0, 0,
frameLow,
frameUp,
numCell,
bk_numAtomInCell,
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
forwardMap_step1,
err.ptr_de,
err.ptr_dindex,
err.ptr_dscalor);
checkCUDAError ("DeviceCellListedMDData::rebuild step1");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step1");
hipLaunchKernelGGL(( Parallel::CudaGlobal::rebuildCellList_step1_mapBondTop)
, dim3(gridDim), dim3(numThreadBlock), 0, 0,
forwardMap_step1,
bk_numAtomInCell,
getMaxNumBond(),
dptr_numBond(),
dptr_bondIndex(),
dptr_bondNeighbor_globalIndex(),
getMaxNumAngle(),
dptr_numAngle(),
dptr_angleIndex(),
dptr_anglePosi(),
dptr_angleNeighbor_globalIndex(),
getMaxNumDihedral(),
dptr_numDihedral(),
dptr_dihedralIndex(),
dptr_dihedralPosi(),
dptr_angleNeighbor_globalIndex(),
bondTopStride());
checkCUDAError ("DeviceCellListedMDData::rebuild map top step1");
hipMemcpy (bk_numAtomInCell, dptr_numAtomInCell(),
totalNumCell * sizeof(IndexType), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( Parallel::CudaGlobal::rebuildCellList_step2)
, dim3(gridDim), dim3(numThreadBlock), numThreadBlock*sizeof(IndexType)*3, 0,
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
forwardMap_step2,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::rebuild step2");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step2");
hipLaunchKernelGGL(( Parallel::CudaGlobal::rebuildCellList_step2_mapBondTop)
, dim3(gridDim), dim3(numThreadBlock), 0, 0,
forwardMap_step2,
bk_numAtomInCell,
getMaxNumBond(),
dptr_numBond(),
dptr_bondIndex(),
dptr_bondNeighbor_globalIndex(),
getMaxNumAngle(),
dptr_numAngle(),
dptr_angleIndex(),
dptr_anglePosi(),
dptr_angleNeighbor_globalIndex(),
getMaxNumDihedral(),
dptr_numDihedral(),
dptr_dihedralIndex(),
dptr_dihedralPosi(),
dptr_angleNeighbor_globalIndex(),
bondTopStride());
checkCUDAError ("DeviceCellListedMDData::rebuild map top step2");
}
hipFree (bk_numAtomInCell);
checkCUDAError ("DeviceCellListedMDData::rebuild free backup");
}
void Parallel::DeviceCellListedMDData::
rebuild (DeviceBondList & dbdlist)
{
IndexType numThreadBlock = Parallel::Interface::numThreadsInCell();
IndexType totalNumCell = numCell.x*numCell.y*numCell.z;
dim3 gridDim = toGridDim(totalNumCell);
IndexType * bk_numAtomInCell;
hipMalloc ((void**)&bk_numAtomInCell, totalNumCell * sizeof(IndexType));
hipMemcpy (bk_numAtomInCell, numAtomInCell, totalNumCell * sizeof(IndexType),
hipMemcpyDeviceToDevice);
checkCUDAError ("DeviceCellListedMDData::rebuild malloc backup");
if (getMaxNumBond() == 0 &&
getMaxNumAngle() == 0 &&
getMaxNumDihedral() == 0){
hipLaunchKernelGGL(( Parallel::CudaGlobal::rebuildCellList_step1)
, dim3(gridDim), dim3(numThreadBlock), 0, 0,
frameLow,
frameUp,
numCell,
bk_numAtomInCell,
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::rebuild step1");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step1");
hipLaunchKernelGGL(( Parallel::CudaGlobal::rebuildCellList_step2)
, dim3(gridDim), dim3(numThreadBlock), numThreadBlock*sizeof(IndexType)*3, 0,
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::rebuild step2");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step2");
}
else {
hipLaunchKernelGGL(( Parallel::CudaGlobal::rebuildCellList_step1)
, dim3(gridDim), dim3(numThreadBlock), 0, 0,
frameLow,
frameUp,
numCell,
bk_numAtomInCell,
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
forwardMap_step1,
err.ptr_de,
err.ptr_dindex,
err.ptr_dscalor);
checkCUDAError ("DeviceCellListedMDData::rebuild step1");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step1");
hipLaunchKernelGGL(( Parallel::CudaGlobal::rebuildCellList_step1_mapBondTop)
, dim3(gridDim), dim3(numThreadBlock), 0, 0,
forwardMap_step1,
bk_numAtomInCell,
getMaxNumBond(),
dptr_numBond(),
dptr_bondIndex(),
dptr_bondNeighbor_globalIndex(),
dbdlist.dptr_bondNeighbor_localIndex(),
getMaxNumAngle(),
dptr_numAngle(),
dptr_angleIndex(),
dptr_anglePosi(),
dptr_angleNeighbor_globalIndex(),
dbdlist.dptr_angleNeighbor_localIndex(),
getMaxNumDihedral(),
dptr_numDihedral(),
dptr_dihedralIndex(),
dptr_dihedralPosi(),
dptr_angleNeighbor_globalIndex(),
dbdlist.dptr_dihedralNeighbor_localIndex(),
bondTopStride());
checkCUDAError ("DeviceCellListedMDData::rebuild map top step1");
hipMemcpy (bk_numAtomInCell, dptr_numAtomInCell(),
totalNumCell * sizeof(IndexType), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( Parallel::CudaGlobal::rebuildCellList_step2)
, dim3(gridDim), dim3(numThreadBlock), numThreadBlock*sizeof(IndexType)*3, 0,
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
forwardMap_step2,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::rebuild step2");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step2");
hipLaunchKernelGGL(( Parallel::CudaGlobal::rebuildCellList_step2_mapBondTop)
, dim3(gridDim), dim3(numThreadBlock), 0, 0,
forwardMap_step2,
bk_numAtomInCell,
getMaxNumBond(),
dptr_numBond(),
dptr_bondIndex(),
dptr_bondNeighbor_globalIndex(),
dbdlist.dptr_bondNeighbor_localIndex(),
getMaxNumAngle(),
dptr_numAngle(),
dptr_angleIndex(),
dptr_anglePosi(),
dptr_angleNeighbor_globalIndex(),
dbdlist.dptr_angleNeighbor_localIndex(),
getMaxNumDihedral(),
dptr_numDihedral(),
dptr_dihedralIndex(),
dptr_dihedralPosi(),
dptr_angleNeighbor_globalIndex(),
dbdlist.dptr_dihedralNeighbor_localIndex(),
bondTopStride());
checkCUDAError ("DeviceCellListedMDData::rebuild map top step2");
}
hipFree (bk_numAtomInCell);
checkCUDAError ("DeviceCellListedMDData::rebuild free backup");
}
__global__ void Parallel::CudaGlobal::
initZeroCell (const IntVectorType numCell,
IndexType * numAtomInCell)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
if (ii < totalNumCell){
numAtomInCell[ii] = 0;
// numNeighborCell[ii] = 0;
}
}
__global__ void Parallel::CudaGlobal::
formCellStructure (const VectorType frameLow,
const VectorType frameUp,
const IntVectorType numCell,
IndexType * numAtomInCell,
const IndexType numAtom,
const CoordType * bk_coord,
const CoordNoiType * bk_coordNoi,
const ScalorType * bk_velox,
const ScalorType * bk_veloy,
const ScalorType * bk_veloz,
const IndexType * bk_globalIndex,
const TypeType * bk_type,
const ScalorType * bk_mass,
const ScalorType * bk_charge,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
__shared__ ScalorType dcellxi;
__shared__ ScalorType dcellyi;
__shared__ ScalorType dcellzi;
if (tid == 0) {
dcellxi = ScalorType(numCell.x) / (frameUp.x - frameLow.x);
}
if (tid == 1){
dcellyi = ScalorType(numCell.y) / (frameUp.y - frameLow.y);
}
if (tid == 2){
dcellzi = ScalorType(numCell.z) / (frameUp.z - frameLow.z);
}
__syncthreads();
IndexType targetIndex;
if (ii < numAtom){
IndexType targetCellx, targetCelly, targetCellz;
targetCellx = IndexType((bk_coord[ii].x - frameLow.x) * dcellxi);
targetCelly = IndexType((bk_coord[ii].y - frameLow.y) * dcellyi);
targetCellz = IndexType((bk_coord[ii].z - frameLow.z) * dcellzi);
// if (targetCellx == numCell.x){
// targetCellx = numCell.x - 1;
// }
// if (targetCelly == numCell.y){
// targetCelly = numCell.y - 1;
// }
// if (targetCellz == numCell.z){
// targetCellz = numCell.z - 1;
// }
if (ptr_de != NULL &&
(targetCellx >= numCell.x ||
targetCelly >= numCell.y ||
targetCellz >= numCell.z)){
*ptr_de = mdErrorOverFlowCellIdx;
return;
}
IndexType cellid = CudaDevice::D3toD1
(numCell, targetCellx, targetCelly, targetCellz);
IndexType pid = atomicAdd (&numAtomInCell[cellid], 1);
if (pid >= blockDim.x){
*ptr_de = mdErrorShortCellList;
pid = 0;
}
targetIndex = pid + cellid * blockDim.x;
coord[targetIndex] = bk_coord[ii];
coordNoi[targetIndex].x = bk_coordNoi[ii].x;
coordNoi[targetIndex].y = bk_coordNoi[ii].y;
coordNoi[targetIndex].z = bk_coordNoi[ii].z;
velox[targetIndex] = bk_velox[ii];
veloy[targetIndex] = bk_veloy[ii];
veloz[targetIndex] = bk_veloz[ii];
globalIndex[targetIndex] = bk_globalIndex[ii];
type[targetIndex] = bk_type[ii];
mass[targetIndex] = bk_mass[ii];
charge[targetIndex] = bk_charge[ii];
}
}
__global__ void Parallel::CudaGlobal::
reinitCellStructure_calForwardMap (const IndexType * bk_numAtomInCell,
const CoordType * bk_coord,
const VectorType frameLow,
const VectorType frameUp,
const IntVectorType numCell,
IndexType * numAtomInCell,
IndexType * forwardMap,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
IndexType this_numAtom = bk_numAtomInCell[bid];
if (this_numAtom == 0) return;
ScalorType dcellxi;
ScalorType dcellyi;
ScalorType dcellzi;
dcellxi = ScalorType(numCell.x) / (frameUp.x - frameLow.x);
dcellyi = ScalorType(numCell.y) / (frameUp.y - frameLow.y);
dcellzi = ScalorType(numCell.z) / (frameUp.z - frameLow.z);
if (tid < this_numAtom){
IndexType targetCellx, targetCelly, targetCellz;
targetCellx = IndexType((bk_coord[ii].x - frameLow.x) * dcellxi);
targetCelly = IndexType((bk_coord[ii].y - frameLow.y) * dcellyi);
targetCellz = IndexType((bk_coord[ii].z - frameLow.z) * dcellzi);
if (ptr_de != NULL &&
(targetCellx >= numCell.x ||
targetCelly >= numCell.y ||
targetCellz >= numCell.z)){
*ptr_de = mdErrorOverFlowCellIdx;
return;
}
IndexType cellid = CudaDevice::D3toD1
(numCell, targetCellx, targetCelly, targetCellz);
IndexType pid = atomicAdd (&numAtomInCell[cellid], 1);
if (pid >= blockDim.x){
*ptr_de = mdErrorShortCellList;
pid = 0;
}
IndexType targetIndex = pid + cellid * blockDim.x;
forwardMap[ii] = targetIndex;
}
}
__global__ void Parallel::CudaGlobal::
reinitCellStructure_step1 (const IndexType * bk_numAtomInCell,
const CoordType * bk_coord,
const CoordNoiType * bk_coordNoi,
const ScalorType * bk_velox,
const ScalorType * bk_veloy,
const ScalorType * bk_veloz,
const ScalorType * bk_forcx,
const ScalorType * bk_forcy,
const ScalorType * bk_forcz,
const IndexType * bk_globalIndex,
const TypeType * bk_type,
const ScalorType * bk_mass,
const ScalorType * bk_charge,
const IndexType * forwardMap,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
IndexType this_numAtom = bk_numAtomInCell[bid];
if (this_numAtom == 0) return;
if (tid < this_numAtom){
IndexType targetIndex = forwardMap[ii];
coord[targetIndex] = bk_coord[ii];
coordNoi[targetIndex].x = bk_coordNoi[ii].x;
coordNoi[targetIndex].y = bk_coordNoi[ii].y;
coordNoi[targetIndex].z = bk_coordNoi[ii].z;
velox[targetIndex] = bk_velox[ii];
veloy[targetIndex] = bk_veloy[ii];
veloz[targetIndex] = bk_veloz[ii];
forcx[targetIndex] = bk_forcx[ii];
forcy[targetIndex] = bk_forcy[ii];
forcz[targetIndex] = bk_forcz[ii];
globalIndex[targetIndex] = bk_globalIndex[ii];
type[targetIndex] = bk_type[ii];
mass[targetIndex] = bk_mass[ii];
charge[targetIndex] = bk_charge[ii];
}
}
__global__ void Parallel::CudaGlobal::
reinitCellStructure_step2 (const IndexType * bk_numAtomInCell,
const IndexType bk_maxNumBond,
const IndexType * bk_numBond,
const IndexType * bk_bondIndex,
const IndexType * bk_bondNeighbor_globalIndex,
const IndexType bk_maxNumAngle,
const IndexType * bk_numAngle,
const IndexType * bk_angleIndex,
const IndexType * bk_anglePosi,
const IndexType * bk_angleNeighbor_globalIndex,
const IndexType bk_maxNumDihedral,
const IndexType * bk_numDihedral,
const IndexType * bk_dihedralIndex,
const IndexType * bk_dihedralPosi,
const IndexType * bk_dihedralNeighbor_globalIndex,
const IndexType bk_bondTopStride,
const IndexType * forwardMap,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi,
IndexType * angleNeighbor_globalIndex,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi,
IndexType * dihedralNeighbor_globalIndex,
const IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
IndexType this_numAtom = bk_numAtomInCell[bid];
if (this_numAtom == 0) return;
if (tid < this_numAtom){
IndexType my_targetIndex = forwardMap[ii];
if (bk_maxNumBond != 0){
IndexType tmpNum = numBond[my_targetIndex] = bk_numBond[ii];
IndexType tmp_target = my_targetIndex;
IndexType tmp_src = ii;
for (IndexType jj = 0; jj < tmpNum; ++jj){
bondIndex[tmp_target] = bk_bondIndex[tmp_src];
bondNeighbor_globalIndex[tmp_target] = bk_bondNeighbor_globalIndex[tmp_src];
tmp_target += bondTopStride;
tmp_src += bk_bondTopStride;
}
}
if (bk_maxNumAngle != 0){
IndexType tmpNum = numAngle[my_targetIndex] = bk_numAngle[ii];
IndexType tmp_target = my_targetIndex;
IndexType tmp_src = ii;
for (IndexType jj = 0; jj < tmpNum; ++jj){
angleIndex[tmp_target] = bk_angleIndex[tmp_src];
anglePosi [tmp_target] = bk_anglePosi [tmp_src];
tmp_target += bondTopStride;
tmp_src += bk_bondTopStride;
}
tmp_target = my_targetIndex;
tmp_src = ii;
for (IndexType jj = 0; jj < tmpNum * 2; ++jj){
bondNeighbor_globalIndex[tmp_target] = bk_bondNeighbor_globalIndex[tmp_src];
tmp_target += bondTopStride;
tmp_src += bk_bondTopStride;
}
}
if (bk_maxNumDihedral != 0){
IndexType tmpNum = numDihedral[my_targetIndex] = bk_numDihedral[ii];
IndexType tmp_target = my_targetIndex;
IndexType tmp_src = ii;
for (IndexType jj = 0; jj < tmpNum; ++jj){
dihedralIndex[tmp_target] = bk_dihedralIndex[tmp_src];
dihedralPosi [tmp_target] = bk_dihedralPosi [tmp_src];
tmp_target += bondTopStride;
tmp_src += bk_bondTopStride;
}
tmp_target = my_targetIndex;
tmp_src = ii;
for (IndexType jj = 0; jj < tmpNum * 2; ++jj){
bondNeighbor_globalIndex[tmp_target] = bk_bondNeighbor_globalIndex[tmp_src];
tmp_target += bondTopStride;
tmp_src += bk_bondTopStride;
}
}
}
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step1 (const VectorType frameLow,
const VectorType frameUp,
const IntVectorType numCell,
const IndexType * bk_numAtomInCell,
IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
__shared__ ScalorType dcellxi;
__shared__ ScalorType dcellyi;
__shared__ ScalorType dcellzi;
if (tid == 0) {
dcellxi = ScalorType(numCell.x) / (frameUp.x - frameLow.x);
}
if (tid == 1){
dcellyi = ScalorType(numCell.y) / (frameUp.y - frameLow.y);
}
if (tid == 2){
dcellzi = ScalorType(numCell.z) / (frameUp.z - frameLow.z);
}
__syncthreads();
// IndexType mark = MaxIndexValue - (MaxIndexValue >> 1);
// IndexType mystat ;
if (tid < bk_numAtomInCell[bid]){
// mystat = globalIndex[ii];
IndexType targetCellx, targetCelly, targetCellz;
targetCellx = IndexType((coord[ii].x - frameLow.x) * dcellxi);
targetCelly = IndexType((coord[ii].y - frameLow.y) * dcellyi);
targetCellz = IndexType((coord[ii].z - frameLow.z) * dcellzi);
// printf ("%d %d %d %d %f %f %f\n", ii, targetCellx, targetCelly, targetCellz,
// coord[ii].x, coord[ii].y, coord[ii].z);
// if (targetCellx == numCell.x){
// targetCellx = numCell.x - 1;
// }
// if (targetCelly == numCell.y){
// targetCelly = numCell.y - 1;
// }
// if (targetCellz == numCell.z){
// targetCellz = numCell.z - 1;
// }
if (ptr_de != NULL &&
(targetCellx >= numCell.x ||
targetCelly >= numCell.y ||
targetCellz >= numCell.z)){
*ptr_de = mdErrorOverFlowCellIdx;
return;
}
IndexType cellid = CudaDevice::D3toD1
(numCell, targetCellx, targetCelly, targetCellz);
if (cellid != bid){
// IndexType pid = atomicAdd (&numAtomInCell[cellid], 1);
// if (pid >= blockDim.x){
// *ptr_de = mdErrorShortCellList;
// pid = 0;
// }
IndexType pid = atomicAdd (&numAtomInCell[cellid], 1);
if (pid >= blockDim.x){
*ptr_de = mdErrorShortCellList;
pid = 0;
}
IndexType targetIndex = pid + cellid * blockDim.x;
coord[targetIndex] = coord[ii];
coordNoi[targetIndex].x = coordNoi[ii].x;
coordNoi[targetIndex].y = coordNoi[ii].y;
coordNoi[targetIndex].z = coordNoi[ii].z;
velox[targetIndex] = velox[ii];
veloy[targetIndex] = veloy[ii];
veloz[targetIndex] = veloz[ii];
forcx[targetIndex] = forcx[ii];
forcy[targetIndex] = forcy[ii];
forcz[targetIndex] = forcz[ii];
globalIndex[targetIndex] = globalIndex[ii];
globalIndex[ii] = MaxIndexValue;
type[targetIndex] = type[ii];
mass[targetIndex] = mass[ii];
charge[targetIndex] = charge[ii];
}
}
// globalIndex[ii] = mystat;
return;
}
static __device__ IndexType
headSort (volatile IndexType * index,
volatile IndexType * sbuff)
{
IndexType k = NUintBit - 1;
IndexType tid = threadIdx.x;
sbuff[tid] = getKthBit(index[tid], k);
sbuff[tid+blockDim.x] = 0;
__syncthreads();
IndexType total1 = sumVectorBlockBuffer (sbuff, blockDim.x);
IndexType target, mydata = index[tid];
__syncthreads();
if (getKthBit(index[tid], k)) {
target = blockDim.x - sbuff[tid];
}
else {
target = tid + sbuff[tid] - total1;
}
__syncthreads();
index[target] = mydata;
__syncthreads();
return total1;
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step2 (IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
// IndexType k = NUintBit - 1;
extern __shared__ volatile IndexType sbuff[];
volatile IndexType * myIndex = (volatile IndexType * )sbuff;
if (tid >= numAtomInCell[bid] || globalIndex[ii] == MaxIndexValue){
myIndex[tid] = MaxIndexValue;
}
else {
myIndex[tid] = tid;
}
__syncthreads();
IndexType total = headSort (myIndex, &sbuff[blockDim.x]);
total = blockDim.x - total;
IndexType fromId;
CoordType bk_coord;
CoordNoiType bk_coordNoi;
ScalorType bk_velox, bk_veloy, bk_veloz;
ScalorType bk_forcx, bk_forcy, bk_forcz;
IndexType bk_globalIndex;
TypeType bk_type;
ScalorType bk_mass;
ScalorType bk_charge;
if (tid < total){
fromId = myIndex[tid] + bid * blockDim.x;
if (ii != fromId){
bk_coord = coord[fromId];
bk_coordNoi.x = coordNoi[fromId].x;
bk_coordNoi.y = coordNoi[fromId].y;
bk_coordNoi.z = coordNoi[fromId].z;
bk_velox = velox[fromId];
bk_veloy = veloy[fromId];
bk_veloz = veloz[fromId];
bk_forcx = forcx[fromId];
bk_forcy = forcy[fromId];
bk_forcz = forcz[fromId];
bk_globalIndex = globalIndex[fromId];
bk_type = type[fromId];
bk_mass = mass[fromId];
bk_charge = charge[fromId];
}
}
__syncthreads();
if (tid < total && ii != fromId){
coord[ii] = bk_coord;
coordNoi[ii].x = bk_coordNoi.x;
coordNoi[ii].y = bk_coordNoi.y;
coordNoi[ii].z = bk_coordNoi.z;
velox[ii] = bk_velox;
veloy[ii] = bk_veloy;
veloz[ii] = bk_veloz;
forcx[ii] = bk_forcx;
forcy[ii] = bk_forcy;
forcz[ii] = bk_forcz;
globalIndex[ii] = bk_globalIndex;
type[ii] = bk_type;
mass[ii] = bk_mass;
charge[ii] = bk_charge;
}
// else {
// globalIndex[ii] = MaxIndexValue;
// }
if (tid == 0){
numAtomInCell[bid] = total;
}
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step1 (const VectorType frameLow,
const VectorType frameUp,
const IntVectorType numCell,
const IndexType * bk_numAtomInCell,
IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge,
IndexType * forwardMap,
mdError_t * ptr_de,
IndexType * erridx,
ScalorType * errsrc)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
__shared__ ScalorType dcellxi;
__shared__ ScalorType dcellyi;
__shared__ ScalorType dcellzi;
if (tid == 0) {
dcellxi = ScalorType(numCell.x) / (frameUp.x - frameLow.x);
dcellyi = ScalorType(numCell.y) / (frameUp.y - frameLow.y);
dcellzi = ScalorType(numCell.z) / (frameUp.z - frameLow.z);
}
forwardMap[ii] = MaxIndexValue;
__syncthreads();
bool inRange = tid < bk_numAtomInCell[bid];
if (inRange){
IndexType targetCellx, targetCelly, targetCellz;
targetCellx = IndexType((coord[ii].x - frameLow.x) * dcellxi);
targetCelly = IndexType((coord[ii].y - frameLow.y) * dcellyi);
targetCellz = IndexType((coord[ii].z - frameLow.z) * dcellzi);
if (ptr_de != NULL &&
(targetCellx >= numCell.x ||
targetCelly >= numCell.y ||
targetCellz >= numCell.z)){
*ptr_de = mdErrorOverFlowCellIdx;
if (targetCellx >= numCell.x){
*erridx = targetCellx;
*errsrc = coord[ii].x;
errsrc[1] = 0;
errsrc[2] = frameLow.x;
errsrc[3] = dcellxi;
errsrc[4] = numCell.x;
}
if (targetCelly >= numCell.y) {
*erridx = targetCelly;
*errsrc = coord[ii].y;
errsrc[1] = 1;
errsrc[2] = frameLow.y;
errsrc[3] = dcellyi;
errsrc[4] = numCell.y;
}
if (targetCellz >= numCell.z) {
*erridx = targetCellz;
*errsrc = coord[ii].z;
errsrc[1] = 2;
errsrc[2] = frameLow.z;
errsrc[3] = dcellzi;
errsrc[4] = numCell.z;
}
return;
}
IndexType cellid = CudaDevice::D3toD1
(numCell, targetCellx, targetCelly, targetCellz);
forwardMap[ii] = ii;
if (cellid != bid){
IndexType pid = atomicAdd (&numAtomInCell[cellid], 1);
if (pid >= blockDim.x){
*ptr_de = mdErrorShortCellList;
pid = 0;
}
IndexType targetIndex = pid + cellid * blockDim.x;
coord[targetIndex] = coord[ii];
coordNoi[targetIndex].x = coordNoi[ii].x;
coordNoi[targetIndex].y = coordNoi[ii].y;
coordNoi[targetIndex].z = coordNoi[ii].z;
velox[targetIndex] = velox[ii];
veloy[targetIndex] = veloy[ii];
veloz[targetIndex] = veloz[ii];
forcx[targetIndex] = forcx[ii];
forcy[targetIndex] = forcy[ii];
forcz[targetIndex] = forcz[ii];
globalIndex[targetIndex] = globalIndex[ii];
globalIndex[ii] = MaxIndexValue;
type[targetIndex] = type[ii];
mass[targetIndex] = mass[ii];
charge[targetIndex] = charge[ii];
forwardMap[ii] = targetIndex;
}
}
return;
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step2 (IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge,
IndexType * forwardMap,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
// IndexType k = NUintBit - 1;
IndexType this_numAtomInCell = numAtomInCell[bid];
if (tid < this_numAtomInCell) {
forwardMap[ii] = ii;
}
else {
forwardMap[ii] = MaxIndexValue;
}
__syncthreads();
if (this_numAtomInCell == 0) return;
extern __shared__ volatile IndexType sbuff[];
volatile IndexType * myIndex = (volatile IndexType * )sbuff;
if (tid >= this_numAtomInCell || globalIndex[ii] == MaxIndexValue){
myIndex[tid] = MaxIndexValue;
}
else {
myIndex[tid] = tid;
}
__syncthreads();
IndexType total = headSort (myIndex, &sbuff[blockDim.x]);
total = blockDim.x - total;
IndexType fromId;
CoordType bk_coord;
CoordNoiType bk_coordNoi;
ScalorType bk_velox, bk_veloy, bk_veloz;
ScalorType bk_forcx, bk_forcy, bk_forcz;
IndexType bk_globalIndex;
TypeType bk_type;
ScalorType bk_mass;
ScalorType bk_charge;
__syncthreads();
if (tid < total){
fromId = myIndex[tid] + bid * blockDim.x;
if (ii != fromId){
bk_coord = coord[fromId];
bk_coordNoi.x = coordNoi[fromId].x;
bk_coordNoi.y = coordNoi[fromId].y;
bk_coordNoi.z = coordNoi[fromId].z;
bk_velox = velox[fromId];
bk_veloy = veloy[fromId];
bk_veloz = veloz[fromId];
bk_forcx = forcx[fromId];
bk_forcy = forcy[fromId];
bk_forcz = forcz[fromId];
bk_globalIndex = globalIndex[fromId];
bk_type = type[fromId];
bk_mass = mass[fromId];
bk_charge = charge[fromId];
forwardMap[fromId] = ii;
}
}
__syncthreads();
if (tid < total && ii != fromId){
coord[ii] = bk_coord;
coordNoi[ii].x = bk_coordNoi.x;
coordNoi[ii].y = bk_coordNoi.y;
coordNoi[ii].z = bk_coordNoi.z;
velox[ii] = bk_velox;
veloy[ii] = bk_veloy;
veloz[ii] = bk_veloz;
forcx[ii] = bk_forcx;
forcy[ii] = bk_forcy;
forcz[ii] = bk_forcz;
globalIndex[ii] = bk_globalIndex;
type[ii] = bk_type;
mass[ii] = bk_mass;
charge[ii] = bk_charge;
}
if (tid == 0){
numAtomInCell[bid] = total;
}
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step1_mapBondTop (const IndexType * forwardMap,
const IndexType * bk_numAtomInCell,
IndexType maxNumBond,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType * bondNeighbor_localIndex,
IndexType maxNumAngle,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi,
IndexType * angleNeighbor_globalIndex,
IndexType * angleNeighbor_localIndex,
IndexType maxNumDihedral,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi,
IndexType * dihedralNeighbor_globalIndex,
IndexType * dihedralNeighbor_localIndex,
IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType this_numAtomInCell = bk_numAtomInCell[bid];
if (this_numAtomInCell == 0) return;
IndexType fromIndex = tid + bid * blockDim.x;
IndexType toIndex = forwardMap[fromIndex];
if (maxNumBond != 0){
IndexType my_numBond;
if (tid < this_numAtomInCell){
my_numBond = (numBond[fromIndex]);
IndexType my_index = fromIndex;
for (IndexType i = 0; i < my_numBond; ++i){
IndexType tmp0 = bondNeighbor_localIndex[my_index];
IndexType tmp1 = forwardMap[tmp0];
if (tmp1 != MaxIndexValue){
bondNeighbor_localIndex[my_index] = tmp1;
}
my_index += bondTopStride;
}
}
__syncthreads();
if (toIndex != MaxIndexValue && toIndex != fromIndex){
numBond[toIndex] = my_numBond;
numBond[fromIndex] = 0;
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType i = 0; i < my_numBond; ++i){
bondIndex[my_toIndex] = bondIndex[my_fromIndex];
bondNeighbor_globalIndex[my_toIndex] =
bondNeighbor_globalIndex[my_fromIndex];
bondNeighbor_localIndex [my_toIndex] =
bondNeighbor_localIndex [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
if (maxNumAngle != 0){
IndexType my_numAngle;
if (tid < this_numAtomInCell){
my_numAngle = (numAngle[fromIndex]);
IndexType my_index = fromIndex;
for (IndexType i = 0; i < my_numAngle * 2; ++i){
IndexType tmp0 = angleNeighbor_localIndex[my_index];
IndexType tmp1 = forwardMap[tmp0];
if (tmp1 != MaxIndexValue){
angleNeighbor_localIndex[my_index] = tmp1;
}
my_index += bondTopStride;
}
}
__syncthreads();
if (toIndex != MaxIndexValue && toIndex != fromIndex){
numAngle[toIndex] = my_numAngle;
numAngle[fromIndex] = 0;
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType i = 0; i < my_numAngle; ++i){
angleIndex[my_toIndex] = angleIndex[my_fromIndex];
anglePosi [my_toIndex] = anglePosi [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType i = 0; i < my_numAngle * 2; ++i){
angleNeighbor_globalIndex[my_toIndex] =
angleNeighbor_globalIndex[my_fromIndex];
angleNeighbor_localIndex [my_toIndex] =
angleNeighbor_localIndex [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
if (maxNumDihedral != 0){
IndexType my_numDihedral;
if (tid < this_numAtomInCell){
my_numDihedral = (numDihedral[fromIndex]);
IndexType my_index = fromIndex;
for (IndexType i = 0; i < my_numDihedral * 3; ++i){
IndexType tmp0 = dihedralNeighbor_localIndex[my_index];
IndexType tmp1 = forwardMap[tmp0];
if (tmp1 != MaxIndexValue){
dihedralNeighbor_localIndex[my_index] = tmp1;
}
my_index += bondTopStride;
}
}
__syncthreads();
if (toIndex != MaxIndexValue && toIndex != fromIndex){
numDihedral[toIndex] = my_numDihedral;
numDihedral[fromIndex] = 0;
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType i = 0; i < my_numDihedral; ++i){
dihedralIndex[my_toIndex] = dihedralIndex[my_fromIndex];
dihedralPosi [my_toIndex] = dihedralPosi [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType i = 0; i < my_numDihedral * 3; ++i){
dihedralNeighbor_globalIndex[my_toIndex] =
dihedralNeighbor_globalIndex[my_fromIndex];
dihedralNeighbor_localIndex [my_toIndex] =
dihedralNeighbor_localIndex [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
// if (toIndex != MaxIndexValue){
// if (maxNumBond != 0){
// IndexType my_numBond = (numBond[toIndex] = numBond[fromIndex]);
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numBond; ++i){
// bondIndex[my_toIndex] = bondIndex[my_fromIndex];
// bondNeighbor_globalIndex[my_toIndex] =
// bondNeighbor_globalIndex[my_fromIndex];
// IndexType tmpIndex0 = bondNeighbor_localIndex[my_fromIndex];
// IndexType tmpIndex1 = forwardMap[tmpIndex0];
// if (tmpIndex1 == MaxIndexValue){
// bondNeighbor_localIndex [my_toIndex] = tmpIndex0;
// }
// else {
// bondNeighbor_localIndex [my_toIndex] = tmpIndex1;
// }
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumAngle != 0){
// IndexType my_numAngle = (numAngle[toIndex] = numAngle[fromIndex]);
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numAngle; ++i){
// angleIndex[my_toIndex] = angleIndex[my_fromIndex];
// anglePosi [my_toIndex] = anglePosi [my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numAngle * 2; ++i){
// angleNeighbor_globalIndex[my_toIndex] =
// angleNeighbor_globalIndex[my_fromIndex];
// IndexType tmpIndex0 = angleNeighbor_localIndex[my_fromIndex];
// IndexType tmpIndex1 = forwardMap[tmpIndex0];
// if (tmpIndex1 == MaxIndexValue){
// angleNeighbor_localIndex [my_toIndex] = tmpIndex0;
// }
// else {
// angleNeighbor_localIndex [my_toIndex] = tmpIndex1;
// }
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumDihedral != 0){
// IndexType my_numDihedral = (numDihedral[toIndex] = numDihedral[fromIndex]);
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numDihedral; ++i){
// dihedralIndex[my_toIndex] = dihedralIndex[my_fromIndex];
// dihedralPosi [my_toIndex] = dihedralPosi [my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numDihedral * 3; ++i){
// dihedralNeighbor_globalIndex[my_toIndex] =
// dihedralNeighbor_globalIndex[my_fromIndex];
// IndexType tmpIndex0 = dihedralNeighbor_localIndex[my_fromIndex];
// IndexType tmpIndex1 = forwardMap[tmpIndex0];
// if (tmpIndex1 == MaxIndexValue){
// dihedralNeighbor_localIndex [my_toIndex] = tmpIndex0;
// }
// else {
// dihedralNeighbor_localIndex [my_toIndex] = tmpIndex1;
// }
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// }
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step2_mapBondTop (const IndexType * forwardMap,
const IndexType * bk_numAtomInCell,
IndexType maxNumBond,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType * bondNeighbor_localIndex,
IndexType maxNumAngle,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi,
IndexType * angleNeighbor_globalIndex,
IndexType * angleNeighbor_localIndex,
IndexType maxNumDihedral,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi,
IndexType * dihedralNeighbor_globalIndex,
IndexType * dihedralNeighbor_localIndex,
IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType this_numAtomInCell = bk_numAtomInCell[bid];
if (this_numAtomInCell == 0) return;
IndexType fromIndex = tid + bid * blockDim.x;
IndexType toIndex = forwardMap[fromIndex];
bool docopy = (toIndex != MaxIndexValue && fromIndex != toIndex);
IndexType bk_num, bk_Index, bk_Posi;
IndexType bk_Neighbor_globalIndex, bk_Neighbor_localIndex;
if (maxNumBond != 0){
IndexType my_numBond;
if (tid < this_numAtomInCell){
my_numBond = (numBond[fromIndex]);
IndexType my_index = fromIndex;
for (IndexType i = 0; i < my_numBond; ++i){
IndexType tmp0 = bondNeighbor_localIndex[my_index];
IndexType tmp1 = forwardMap[tmp0];
if (tmp1 != MaxIndexValue){
bondNeighbor_localIndex[my_index] = tmp1;
}
my_index += bondTopStride;
}
}
__syncthreads();
if (maxNumBond != 0){
if (docopy){
bk_num = my_numBond;
}
__syncthreads();
if (docopy){
numBond[toIndex] = bk_num;
}
__syncthreads();
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumBond; ++kk){
if (docopy){
bk_Index = bondIndex[my_fromIndex];
bk_Neighbor_globalIndex = bondNeighbor_globalIndex[my_fromIndex];
bk_Neighbor_localIndex = bondNeighbor_localIndex [my_fromIndex];
}
__syncthreads();
if (docopy){
bondIndex[my_toIndex] = bk_Index;
bondNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
bondNeighbor_localIndex [my_toIndex] = bk_Neighbor_localIndex;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
if (maxNumAngle != 0){
IndexType my_numAngle;
if (tid < this_numAtomInCell){
my_numAngle = (numAngle[fromIndex]);
IndexType my_index = fromIndex;
for (IndexType i = 0; i < my_numAngle * 2; ++i){
IndexType tmp0 = angleNeighbor_localIndex[my_index];
IndexType tmp1 = forwardMap[tmp0];
if (tmp1 != MaxIndexValue){
angleNeighbor_localIndex[my_index] = tmp1;
}
my_index += bondTopStride;
}
}
__syncthreads();
if (maxNumAngle != 0){
if (docopy){
bk_num = my_numAngle;
}
__syncthreads();
if (docopy){
numAngle[toIndex] = bk_num;
}
__syncthreads();
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumAngle; ++kk){
if (docopy){
bk_Index = angleIndex[my_fromIndex];
bk_Posi = anglePosi [my_fromIndex];
}
__syncthreads();
if (docopy){
angleIndex[my_toIndex] = bk_Index;
anglePosi [my_toIndex] = bk_Posi ;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumAngle * 2; ++kk){
if (docopy){
bk_Neighbor_globalIndex = angleNeighbor_globalIndex[my_fromIndex];
bk_Neighbor_localIndex = angleNeighbor_localIndex [my_fromIndex];
}
__syncthreads();
if (docopy){
angleNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
angleNeighbor_localIndex [my_toIndex] = bk_Neighbor_localIndex;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
if (maxNumDihedral != 0){
IndexType my_numDihedral;
if (tid < this_numAtomInCell){
my_numDihedral = (numDihedral[fromIndex]);
IndexType my_index = fromIndex;
for (IndexType i = 0; i < my_numDihedral * 3; ++i){
IndexType tmp0 = dihedralNeighbor_localIndex[my_index];
IndexType tmp1 = forwardMap[tmp0];
if (tmp1 != MaxIndexValue){
dihedralNeighbor_localIndex[my_index] = tmp1;
}
my_index += bondTopStride;
}
}
__syncthreads();
if (maxNumDihedral != 0){
if (docopy){
bk_num = my_numDihedral;
}
__syncthreads();
if (docopy){
numDihedral[toIndex] = bk_num;
}
__syncthreads();
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumDihedral; ++kk){
if (docopy){
bk_Index = dihedralIndex[my_fromIndex];
bk_Posi = dihedralPosi [my_fromIndex];
}
__syncthreads();
if (docopy){
dihedralIndex[my_toIndex] = bk_Index;
dihedralPosi [my_toIndex] = bk_Posi ;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumDihedral * 3; ++kk){
if (docopy){
bk_Neighbor_globalIndex = dihedralNeighbor_globalIndex[my_fromIndex];
bk_Neighbor_localIndex = dihedralNeighbor_localIndex [my_fromIndex];
}
__syncthreads();
if (docopy){
dihedralNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
dihedralNeighbor_localIndex [my_toIndex] = bk_Neighbor_localIndex;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
// if (maxNumBond != 0){
// if (docopy){
// bk_num = numBond[fromIndex];
// }
// __syncthreads();
// if (docopy){
// numBond[toIndex] = bk_num;
// }
// __syncthreads();
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumBond; ++kk){
// if (docopy){
// bk_Index = bondIndex[my_fromIndex];
// bk_Neighbor_globalIndex = bondNeighbor_globalIndex[my_fromIndex];
// IndexType tmpIndex0 = bondNeighbor_localIndex[my_fromIndex];
// IndexType tmpIndex1 = forwardMap[tmpIndex0];
// if (tmpIndex1 == MaxIndexValue){
// bk_Neighbor_localIndex = tmpIndex0;
// }
// else {
// bk_Neighbor_localIndex = tmpIndex1;
// }
// }
// __syncthreads();
// if (docopy){
// bondIndex[my_toIndex] = bk_Index;
// bondNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
// bondNeighbor_localIndex [my_toIndex] = bk_Neighbor_localIndex;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumAngle != 0){
// if (docopy){
// bk_num = numAngle[fromIndex];
// }
// __syncthreads();
// if (docopy){
// numAngle[toIndex] = bk_num;
// }
// __syncthreads();
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumAngle; ++kk){
// if (docopy){
// bk_Index = angleIndex[my_fromIndex];
// bk_Posi = anglePosi [my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// angleIndex[my_toIndex] = bk_Index;
// anglePosi [my_toIndex] = bk_Posi ;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumAngle * 2; ++kk){
// if (docopy){
// bk_Neighbor_globalIndex = angleNeighbor_globalIndex[my_fromIndex];
// IndexType tmpIndex0 = angleNeighbor_localIndex[my_fromIndex];
// IndexType tmpIndex1 = forwardMap[tmpIndex0];
// if (tmpIndex1 == MaxIndexValue){
// bk_Neighbor_localIndex = tmpIndex0;
// }
// else {
// bk_Neighbor_localIndex = tmpIndex1;
// }
// }
// __syncthreads();
// if (docopy){
// angleNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
// angleNeighbor_localIndex [my_toIndex] = bk_Neighbor_localIndex;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumDihedral != 0){
// if (docopy){
// bk_num = numDihedral[fromIndex];
// }
// __syncthreads();
// if (docopy){
// numDihedral[toIndex] = bk_num;
// }
// __syncthreads();
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumDihedral; ++kk){
// if (docopy){
// bk_Index = dihedralIndex[my_fromIndex];
// bk_Posi = dihedralPosi [my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// dihedralIndex[my_toIndex] = bk_Index;
// dihedralPosi [my_toIndex] = bk_Posi ;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumDihedral * 3; ++kk){
// if (docopy){
// bk_Neighbor_globalIndex = dihedralNeighbor_globalIndex[my_fromIndex];
// IndexType tmpIndex0 = dihedralNeighbor_localIndex[my_fromIndex];
// IndexType tmpIndex1 = forwardMap[tmpIndex0];
// if (tmpIndex1 == MaxIndexValue){
// bk_Neighbor_localIndex = tmpIndex0;
// }
// else {
// bk_Neighbor_localIndex = tmpIndex1;
// }
// }
// __syncthreads();
// if (docopy){
// dihedralNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
// dihedralNeighbor_localIndex [my_toIndex] = bk_Neighbor_localIndex;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step1_mapBondTop (const IndexType * forwardMap,
const IndexType * bk_numAtomInCell,
IndexType maxNumBond,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType maxNumAngle,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi,
IndexType * angleNeighbor_globalIndex,
IndexType maxNumDihedral,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi,
IndexType * dihedralNeighbor_globalIndex,
IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType this_numAtomInCell = bk_numAtomInCell[bid];
if (this_numAtomInCell == 0) return;
IndexType fromIndex = tid + bid * blockDim.x;
IndexType toIndex = forwardMap[fromIndex];
if (maxNumBond != 0){
if (toIndex != MaxIndexValue && toIndex != fromIndex){
IndexType my_numBond = numBond[toIndex] = numBond[fromIndex];
numBond[fromIndex] = 0;
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType i = 0; i < my_numBond; ++i){
bondIndex[my_toIndex] = bondIndex[my_fromIndex];
bondNeighbor_globalIndex[my_toIndex] =
bondNeighbor_globalIndex[my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
if (maxNumAngle != 0){
if (toIndex != MaxIndexValue && toIndex != fromIndex){
IndexType my_numAngle = numAngle[toIndex] = numAngle[fromIndex];
numAngle[fromIndex] = 0;
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType i = 0; i < my_numAngle; ++i){
angleIndex[my_toIndex] = angleIndex[my_fromIndex];
anglePosi [my_toIndex] = anglePosi [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType i = 0; i < my_numAngle * 2; ++i){
angleNeighbor_globalIndex[my_toIndex] =
angleNeighbor_globalIndex[my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
if (maxNumDihedral != 0){
if (toIndex != MaxIndexValue && toIndex != fromIndex){
IndexType my_numDihedral = numDihedral[toIndex] = numDihedral[fromIndex];
numDihedral[fromIndex] = 0;
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType i = 0; i < my_numDihedral; ++i){
dihedralIndex[my_toIndex] = dihedralIndex[my_fromIndex];
dihedralPosi [my_toIndex] = dihedralPosi [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType i = 0; i < my_numDihedral * 3; ++i){
dihedralNeighbor_globalIndex[my_toIndex] =
dihedralNeighbor_globalIndex[my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step2_mapBondTop (const IndexType * forwardMap,
const IndexType * bk_numAtomInCell,
IndexType maxNumBond,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType maxNumAngle,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi,
IndexType * angleNeighbor_globalIndex,
IndexType maxNumDihedral,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi,
IndexType * dihedralNeighbor_globalIndex,
IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType this_numAtomInCell = bk_numAtomInCell[bid];
if (this_numAtomInCell == 0) return;
IndexType fromIndex = tid + bid * blockDim.x;
IndexType toIndex = forwardMap[fromIndex];
bool docopy = (toIndex != MaxIndexValue && fromIndex != toIndex);
IndexType bk_num, bk_Index, bk_Posi;
IndexType bk_Neighbor_globalIndex;
if (maxNumBond != 0){
if (docopy){
bk_num = (numBond[fromIndex]);
}
__syncthreads();
if (docopy){
numBond[toIndex] = bk_num;
}
__syncthreads();
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumBond; ++kk){
if (docopy){
bk_Index = bondIndex[my_fromIndex];
bk_Neighbor_globalIndex = bondNeighbor_globalIndex[my_fromIndex];
}
__syncthreads();
if (docopy){
bondIndex[my_toIndex] = bk_Index;
bondNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
if (maxNumAngle != 0){
if (docopy){
bk_num = (numAngle[fromIndex]);
}
__syncthreads();
if (docopy){
numAngle[toIndex] = bk_num;
}
__syncthreads();
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumAngle; ++kk){
if (docopy){
bk_Index = angleIndex[my_fromIndex];
bk_Posi = anglePosi [my_fromIndex];
}
__syncthreads();
if (docopy){
angleIndex[my_toIndex] = bk_Index;
anglePosi [my_toIndex] = bk_Posi ;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumAngle * 2; ++kk){
if (docopy){
bk_Neighbor_globalIndex = angleNeighbor_globalIndex[my_fromIndex];
}
__syncthreads();
if (docopy){
angleNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
if (maxNumDihedral != 0){
if (docopy){
bk_num = (numDihedral[fromIndex]);
}
__syncthreads();
if (docopy){
numDihedral[toIndex] = bk_num;
}
__syncthreads();
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumDihedral; ++kk){
if (docopy){
bk_Index = dihedralIndex[my_fromIndex];
bk_Posi = dihedralPosi [my_fromIndex];
}
__syncthreads();
if (docopy){
dihedralIndex[my_toIndex] = bk_Index;
dihedralPosi [my_toIndex] = bk_Posi ;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumDihedral * 3; ++kk){
if (docopy){
bk_Neighbor_globalIndex = dihedralNeighbor_globalIndex[my_fromIndex];
}
__syncthreads();
if (docopy){
dihedralNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
// __global__ void Parallel::CudaGlobal::
// rebuildCellList_step1_mapBondTop (const IndexType * forwardMap,
// IndexType maxNumBond,
// IndexType * numBond,
// IndexType * bondIndex,
// IndexType * bondNeighbor_globalIndex,
// IndexType maxNumAngle,
// IndexType * numAngle,
// IndexType * angleIndex,
// IndexType * anglePosi,
// IndexType * angleNeighbor_globalIndex,
// IndexType maxNumDihedral,
// IndexType * numDihedral,
// IndexType * dihedralIndex,
// IndexType * dihedralPosi,
// IndexType * dihedralNeighbor_globalIndex,
// IndexType bondTopStride)
// {
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// IndexType fromIndex = tid + bid * blockDim.x;
// IndexType toIndex = forwardMap[fromIndex];
// if (toIndex != MaxIndexValue){
// if (maxNumBond != 0){
// IndexType my_numBond = (numBond[toIndex] = numBond[fromIndex]);
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numBond; ++i){
// bondIndex[my_toIndex] = bondIndex[my_fromIndex];
// bondNeighbor_globalIndex[my_toIndex] =
// bondNeighbor_globalIndex[my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumAngle != 0){
// IndexType my_numAngle = (numAngle[toIndex] = numAngle[fromIndex]);
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numAngle; ++i){
// angleIndex[my_toIndex] = angleIndex[my_fromIndex];
// anglePosi [my_toIndex] = anglePosi [my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numAngle * 2; ++i){
// angleNeighbor_globalIndex[my_toIndex] =
// angleNeighbor_globalIndex[my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumDihedral != 0){
// IndexType my_numDihedral = (numDihedral[toIndex] = numDihedral[fromIndex]);
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numDihedral; ++i){
// dihedralIndex[my_toIndex] = dihedralIndex[my_fromIndex];
// dihedralPosi [my_toIndex] = dihedralPosi [my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numDihedral * 3; ++i){
// dihedralNeighbor_globalIndex[my_toIndex] =
// dihedralNeighbor_globalIndex[my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// }
// }
// __global__ void Parallel::CudaGlobal::
// rebuildCellList_step2_mapBondTop (const IndexType * forwardMap,
// IndexType maxNumBond,
// IndexType * numBond,
// IndexType * bondIndex,
// IndexType * bondNeighbor_globalIndex,
// IndexType maxNumAngle,
// IndexType * numAngle,
// IndexType * angleIndex,
// IndexType * anglePosi,
// IndexType * angleNeighbor_globalIndex,
// IndexType maxNumDihedral,
// IndexType * numDihedral,
// IndexType * dihedralIndex,
// IndexType * dihedralPosi,
// IndexType * dihedralNeighbor_globalIndex,
// IndexType bondTopStride)
// {
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// IndexType fromIndex = tid + bid * blockDim.x;
// IndexType toIndex = forwardMap[fromIndex];
// bool docopy = (toIndex != MaxIndexValue);
// IndexType bk_num, bk_Index, bk_Posi;
// IndexType bk_Neighbor_globalIndex;
// if (maxNumBond != 0){
// if (docopy){
// bk_num = numBond[fromIndex];
// }
// __syncthreads();
// if (docopy){
// numBond[toIndex] = bk_num;
// }
// __syncthreads();
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumBond; ++kk){
// if (docopy){
// bk_Index = bondIndex[my_fromIndex];
// bk_Neighbor_globalIndex = bondNeighbor_globalIndex[my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// bondIndex[my_toIndex] = bk_Index;
// bondNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumAngle != 0){
// if (docopy){
// bk_num = numAngle[fromIndex];
// }
// __syncthreads();
// if (docopy){
// numAngle[toIndex] = bk_num;
// }
// __syncthreads();
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumAngle; ++kk){
// if (docopy){
// bk_Index = angleIndex[my_fromIndex];
// bk_Posi = anglePosi [my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// angleIndex[my_toIndex] = bk_Index;
// anglePosi [my_toIndex] = bk_Posi ;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumAngle * 2; ++kk){
// if (docopy){
// bk_Neighbor_globalIndex = angleNeighbor_globalIndex[my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// angleNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumDihedral != 0){
// if (docopy){
// bk_num = numDihedral[fromIndex];
// }
// __syncthreads();
// if (docopy){
// numDihedral[toIndex] = bk_num;
// }
// __syncthreads();
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumDihedral; ++kk){
// if (docopy){
// bk_Index = dihedralIndex[my_fromIndex];
// bk_Posi = dihedralPosi [my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// dihedralIndex[my_toIndex] = bk_Index;
// dihedralPosi [my_toIndex] = bk_Posi ;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumDihedral * 3; ++kk){
// if (docopy){
// bk_Neighbor_globalIndex = dihedralNeighbor_globalIndex[my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// dihedralNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// }
// __global__ void Parallel::CudaGlobal::
// rebuildCellList_step1 (const VectorType frameLow,
// const VectorType frameUp,
// const IntVectorType numCell,
// const IndexType * bk_numAtomInCell,
// IndexType * numAtomInCell,
// CoordType * coord,
// CoordNoiType * coordNoi,
// ScalorType * velox,
// ScalorType * veloy,
// ScalorType * veloz,
// ScalorType * forcx,
// ScalorType * forcy,
// ScalorType * forcz,
// IndexType * globalIndex,
// TypeType * type,
// ScalorType * mass,
// ScalorType * charge,
// mdError_t * ptr_de)
// {
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// IndexType ii = tid + bid * blockDim.x;
// __shared__ ScalorType dcellxi;
// __shared__ ScalorType dcellyi;
// __shared__ ScalorType dcellzi;
// if (tid == 0) {
// dcellxi = ScalorType(numCell.x) / (frameUp.x - frameLow.x);
// dcellyi = ScalorType(numCell.y) / (frameUp.y - frameLow.y);
// dcellzi = ScalorType(numCell.z) / (frameUp.z - frameLow.z);
// }
// __syncthreads();
// bool inRange = tid < bk_numAtomInCell[bid];
// if (inRange){
// IndexType targetCellx, targetCelly, targetCellz;
// targetCellx = IndexType((coord[ii].x - frameLow.x) * dcellxi);
// targetCelly = IndexType((coord[ii].y - frameLow.y) * dcellyi);
// targetCellz = IndexType((coord[ii].z - frameLow.z) * dcellzi);
// if (ptr_de != NULL &&
// (targetCellx >= numCell.x ||
// targetCelly >= numCell.y ||
// targetCellz >= numCell.z)){
// *ptr_de = mdErrorOverFlowCellIdx;
// return;
// }
// IndexType cellid = CudaDevice::D3toD1
// (numCell, targetCellx, targetCelly, targetCellz);
// if (cellid != bid){
// IndexType pid = atomicAdd (&numAtomInCell[cellid], 1);
// if (pid >= blockDim.x){
// *ptr_de = mdErrorShortCellList;
// pid = 0;
// }
// IndexType targetIndex = pid + cellid * blockDim.x;
// coord[targetIndex] = coord[ii];
// coordNoi[targetIndex].x = coordNoi[ii].x;
// coordNoi[targetIndex].y = coordNoi[ii].y;
// coordNoi[targetIndex].z = coordNoi[ii].z;
// velox[targetIndex] = velox[ii];
// veloy[targetIndex] = veloy[ii];
// veloz[targetIndex] = veloz[ii];
// forcx[targetIndex] = forcx[ii];
// forcy[targetIndex] = forcy[ii];
// forcz[targetIndex] = forcz[ii];
// globalIndex[targetIndex] = globalIndex[ii];
// globalIndex[ii] = MaxIndexValue;
// type[targetIndex] = type[ii];
// mass[targetIndex] = mass[ii];
// charge[targetIndex] = charge[ii];
// IndexType my_numBond = (numBond[targetIndex] = numBond[ii]);
// for (IndexType kk = 0; kk < my_numBond; ++kk){
// IndexType fromListIndex = Parallel::DeviceBondList_cudaDevice::indexConvert
// (bondTopStride, ii, kk);
// IndexType toListIndex = Parallel::DeviceBondList_cudaDevice::indexConvert
// (bondTopStride, targetIndex, kk);
// bondNeighbor_globalIndex[toListIndex] = bondNeighbor_globalIndex[fromListIndex];
// bondIndex[toListIndex] = bondIndex[fromListIndex];
// bondNeighbor_localIndex[toListIndex] = bondNeighbor_localIndex[fromListIndex];
// }
// }
// }
// return;
// }
// __global__ void Parallel::CudaGlobal::
// rebuildCellList_step2 (IndexType * numAtomInCell,
// CoordType * coord,
// CoordNoiType * coordNoi,
// ScalorType * velox,
// ScalorType * veloy,
// ScalorType * veloz,
// ScalorType * forcx,
// ScalorType * forcy,
// ScalorType * forcz,
// IndexType * globalIndex,
// TypeType * type,
// ScalorType * mass,
// ScalorType * charge,
// IndexType * numBond,
// IndexType * bondNeighbor_globalIndex,
// IndexType * bondIndex,
// IndexType * bondNeighbor_localIndex,
// IndexType bondTopStride,
// IndexType maxNumBond,
// mdError_t * ptr_de)
// {
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// IndexType ii = tid + bid * blockDim.x;
// // IndexType k = NUintBit - 1;
// extern __shared__ volatile IndexType sbuff[];
// volatile IndexType * myIndex = (volatile IndexType * )sbuff;
// if (tid >= numAtomInCell[bid] || globalIndex[ii] == MaxIndexValue){
// myIndex[tid] = MaxIndexValue;
// }
// else {
// myIndex[tid] = tid;
// }
// __syncthreads();
// IndexType total = headSort (myIndex, &sbuff[blockDim.x]);
// total = blockDim.x - total;
// IndexType fromId;
// CoordType bk_coord;
// CoordNoiType bk_coordNoi;
// ScalorType bk_velox, bk_veloy, bk_veloz;
// ScalorType bk_forcx, bk_forcy, bk_forcz;
// IndexType bk_globalIndex;
// TypeType bk_type;
// ScalorType bk_mass;
// ScalorType bk_charge;
// IndexType bk_numBond;
// if (tid < total){
// fromId = myIndex[tid] + bid * blockDim.x;
// if (ii != fromId){
// bk_coord = coord[fromId];
// bk_coordNoi.x = coordNoi[fromId].x;
// bk_coordNoi.y = coordNoi[fromId].y;
// bk_coordNoi.z = coordNoi[fromId].z;
// bk_velox = velox[fromId];
// bk_veloy = veloy[fromId];
// bk_veloz = veloz[fromId];
// bk_forcx = forcx[fromId];
// bk_forcy = forcy[fromId];
// bk_forcz = forcz[fromId];
// bk_globalIndex = globalIndex[fromId];
// bk_type = type[fromId];
// bk_mass = mass[fromId];
// bk_charge = charge[fromId];
// bk_numBond = numBond[fromId];
// }
// }
// __syncthreads();
// bool docopy = (tid < total && ii != fromId);
// if (docopy){
// coord[ii] = bk_coord;
// coordNoi[ii].x = bk_coordNoi.x;
// coordNoi[ii].y = bk_coordNoi.y;
// coordNoi[ii].z = bk_coordNoi.z;
// velox[ii] = bk_velox;
// veloy[ii] = bk_veloy;
// veloz[ii] = bk_veloz;
// forcx[ii] = bk_forcx;
// forcy[ii] = bk_forcy;
// forcz[ii] = bk_forcz;
// globalIndex[ii] = bk_globalIndex;
// type[ii] = bk_type;
// mass[ii] = bk_mass;
// charge[ii] = bk_charge;
// numBond[ii] = bk_numBond;
// }
// IndexType bk_bondNeighbor_globalIndex;
// IndexType bk_bondIndex;
// IndexType bk_bondNeighbor_localIndex;
// for (IndexType kk = 0; kk < maxNumBond; ++kk){
// __syncthreads();
// if (docopy){
// bk_bondNeighbor_globalIndex = bondNeighbor_globalIndex[fromId];
// bk_bondIndex = bondIndex[fromId];
// bk_bondNeighbor_localIndex = bondNeighbor_localIndex[fromId];
// }
// __syncthreads();
// if (docopy){
// bondNeighbor_globalIndex [ii] = bk_bondNeighbor_globalIndex;
// bondIndex[ii] = bk_bondIndex;
// bondNeighbor_localIndex[ii] = bk_bondNeighbor_localIndex;
// }
// }
// if (tid == 0){
// numAtomInCell[bid] = total;
// }
// }
void Parallel::DeviceCellListedMDData::
easyMallocCell (const IndexType & totalNumCell)
{
if (totalNumCell == 0) return;
// if (totalNumCell == numCell.x * numCell.y * numCell.z) return;
// maxNumNeighborCell = maxNumNeighborCell_;
clearCell ();
hipMalloc ((void**)&numAtomInCell, sizeof(IndexType) * totalNumCell);
hipMalloc ((void**)&forwardMap_step1,
sizeof(IndexType) * totalNumCell * Parallel::Interface::numThreadsInCell());
hipMalloc ((void**)&forwardMap_step2,
sizeof(IndexType) * totalNumCell * Parallel::Interface::numThreadsInCell());
// hipMalloc ((void**)&numNeighborCell, sizeof(IndexType) * totalNumCell);
// hipMalloc ((void**)&neighborCellIndex,
// sizeof(IndexType) * totalNumCell * maxNumNeighborCell);
memSize = totalNumCell;
checkCUDAError ("malloc Cell");
malloced = true;
}
void Parallel::DeviceCellListedMDData::
clearCell()
{
if (malloced){
hipFree (numAtomInCell);
hipFree (forwardMap_step2);
hipFree (forwardMap_step1);
memSize = 0;
// hipFree (numNeighborCell);
// hipFree (neighborCellIndex);
malloced = false;
}
}
Parallel::DeviceCellListedMDData::
DeviceCellListedMDData ()
{
rlist = 0;
devideLevel = 0;
frameLow.x = frameLow.y = frameLow.z = 0;
frameUp.x = frameUp.y = frameUp.z = 0;
numCell.x = numCell.y = numCell.z = 0;
memSize = 0;
// maxNumNeighborCell = 0;
malloced = false;
}
Parallel::DeviceCellListedMDData::
~DeviceCellListedMDData()
{
clearCell();
}
Parallel::SubCellList::
SubCellList ()
{
}
void Parallel::SubCellList::
build ()
{
Parallel::Interface::sort (this->begin(), this->end());
}
bool Parallel::SubCellList::
isBuilt ()
{
return (Parallel::Interface::is_sorted (this->begin(), this->end()));
}
void Parallel::SubCellList::
add (const SubCellList & a)
{
for (std::vector<IndexType>::const_iterator it = a.begin();
it != a.end(); ++it){
push_back (*it);
}
Parallel::Interface::unique (this->begin(), this->end());
Parallel::Interface::sort (this->begin(), this->end());
}
void Parallel::SubCellList::
sub (const SubCellList & a)
{
std::vector<IndexType > result (this->size());
std::vector<IndexType >::iterator newend =
Parallel::Interface::set_difference (this->begin(), this->end(),
a.begin(), a.end(),
result.begin());
std::vector<IndexType >::iterator newend2 =
Parallel::Interface::copy (result.begin(), newend, this->begin());
this->erase (newend2, this->end());
}
void Parallel::DeviceCellListedMDData::
buildSubList (const IndexType & xIdLo,
const IndexType & xIdUp,
const IndexType & yIdLo,
const IndexType & yIdUp,
const IndexType & zIdLo,
const IndexType & zIdUp,
SubCellList & subList) const
{
if (xIdUp > numCell.x){
throw MDExcptCellList ("x up index exceeds number of cells on x");
}
if (yIdUp > numCell.y){
throw MDExcptCellList ("y up index exceeds number of cells on y");
}
if (zIdUp > numCell.z){
throw MDExcptCellList ("z up index exceeds number of cells on z");
}
subList.clear();
for (IndexType i = xIdLo; i < xIdUp; ++i){
for (IndexType j = yIdLo; j < yIdUp; ++j){
for (IndexType k = zIdLo; k < zIdUp; ++k){
subList.push_back ( D3toD1 (i, j, k));
}
}
}
}
Parallel::DeviceTransferPackage::
DeviceTransferPackage ()
: numCell (0), memSize(0), hcellIndex(NULL), hcellStartIndex(NULL),
myMask (MDDataItemMask_All)
{
}
void Parallel::DeviceTransferPackage::
clearMe ()
{
if (memSize != 0){
hipFree (cellIndex);
hipFree (cellStartIndex);
freeAPointer ((void**)&hcellIndex);
freeAPointer ((void**)&hcellStartIndex);
memSize = 0;
numCell = 0;
}
}
Parallel::DeviceTransferPackage::
~DeviceTransferPackage ()
{
clearMe();
}
void Parallel::DeviceTransferPackage::
easyMallocMe (IndexType memSize_)
{
if (memSize_ == 0) return;
// if (memSize == memSize_) return;
clearMe ();
memSize = memSize_;
size_t size = memSize * sizeof(IndexType);
size_t size1 = (memSize+1) * sizeof(IndexType);
hipMalloc ((void**)&cellIndex, size);
hipMalloc ((void**)&cellStartIndex, size1);
checkCUDAError ("DeviceTransferPackage::mallocMe failed malloc");
hcellIndex = (IndexType *) malloc (size);
if (hcellIndex == NULL){
throw MDExcptFailedMallocOnHost ("DeviceTransferPackage::reinit",
"hcellIndex", size);
}
hcellStartIndex = (IndexType *) malloc (size1);
if (hcellStartIndex == NULL){
throw MDExcptFailedMallocOnHost ("DeviceTransferPackage::reinit",
"hcellStartIndex", size1);
}
}
void Parallel::DeviceTransferPackage::
reinit (const SubCellList & subCellList)
{
if (memSize < subCellList.size()){
easyMallocMe (subCellList.size()*MemAllocExtension);
}
numCell = subCellList.size();
for (IndexType i = 0; i < numCell; ++i){
hcellIndex[i] = subCellList[i];
}
size_t size = memSize * sizeof(IndexType);
hipMemcpy (cellIndex, hcellIndex, size, hipMemcpyHostToDevice);
checkCUDAError ("DeviceTransferPackage::reinit memcpy");
}
void Parallel::DeviceTransferPackage::
pack (const DeviceCellListedMDData & ddata,
const MDDataItemMask_t mask)
{
if (numCell == 0) return;
myMask = mask;
IndexType totalNumCell = ddata.numCell.x * ddata.numCell.y * ddata.numCell.z;
IndexType * numAtomInCell ;
size_t size = totalNumCell * sizeof(IndexType);
numAtomInCell = (IndexType *) malloc (size);
if (numAtomInCell == NULL){
throw MDExcptFailedMallocOnHost ("DeviceTransferPackage::reinit",
"numAtomInCell", size);
}
hipMemcpy (numAtomInCell, ddata.numAtomInCell, size,
hipMemcpyDeviceToHost);
checkCUDAError ("DeviceTransferPackage::pack cpy numAtomInCell to host");
hcellStartIndex[0] = 0;
for (IndexType i = 1; i < numCell+1; ++i){
hcellStartIndex[i] = hcellStartIndex[i-1] + numAtomInCell[hcellIndex[i-1]];
}
IndexType & expectedNumData (hcellStartIndex[numCell]);
hipMemcpy (cellStartIndex, hcellStartIndex, (numCell+1) * sizeof(IndexType),
hipMemcpyHostToDevice);
checkCUDAError ("DeviceTransferPackage::pack cpy cellStartIndex to device");
free (numAtomInCell);
IndexType expectedNumBond(0), expectedNumAngle(0), expectedNumDihedral(0);
bool copyBond = (mask & MDDataItemMask_Bond);
bool copyAngle = (mask & MDDataItemMask_Angle);
bool copyDihedral = (mask & MDDataItemMask_Dihedral);
if (copyBond) expectedNumBond = ddata.getMaxNumBond();
if (copyAngle) expectedNumAngle = ddata.getMaxNumAngle();
if (copyDihedral) expectedNumDihedral = ddata.getMaxNumDihedral();
DeviceMDData::setGlobalBox (ddata.getGlobalBox());
if (expectedNumData > DeviceMDData::memSize() ){
// printf ("# DeviceTransferPackage::pack, realloc\n");
DeviceMDData::easyMalloc(
expectedNumData * MemAllocExtension,
expectedNumBond, expectedNumAngle, expectedNumDihedral);
}
else if ((copyBond && (getMaxNumBond() != ddata.getMaxNumBond())) ||
(copyAngle && (getMaxNumAngle() != ddata.getMaxNumAngle())) ||
(copyDihedral && (getMaxNumDihedral() != ddata.getMaxNumDihedral())) ){
// printf ("# DeviceTransferPackage::pack, realloc\n");
DeviceMDData::easyMalloc(
DeviceMDData::memSize(),
expectedNumBond, expectedNumAngle, expectedNumDihedral);
}
DeviceMDData::numData() = expectedNumData;
checkCUDAError ("DeviceTransferPackage::pack, packDeviceMDData, before");
hipLaunchKernelGGL(( Parallel::CudaGlobal::packDeviceMDData)
, dim3(numCell), dim3(Parallel::Interface::numThreadsInCell()), 0, 0,
cellIndex,
ddata.dptr_numAtomInCell(),
cellStartIndex,
mask,
ddata.dptr_coordinate(),
ddata.dptr_coordinateNoi(),
ddata.dptr_velocityX(),
ddata.dptr_velocityY(),
ddata.dptr_velocityZ(),
ddata.dptr_forceX(),
ddata.dptr_forceY(),
ddata.dptr_forceZ(),
ddata.dptr_globalIndex(),
ddata.dptr_type(),
ddata.dptr_mass(),
ddata.dptr_charge(),
this->dptr_coordinate(),
this->dptr_coordinateNoi(),
this->dptr_velocityX(),
this->dptr_velocityY(),
this->dptr_velocityZ(),
this->dptr_forceX(),
this->dptr_forceY(),
this->dptr_forceZ(),
this->dptr_globalIndex(),
this->dptr_type(),
this->dptr_mass(),
this->dptr_charge());
hipLaunchKernelGGL(( Parallel::CudaGlobal::packDeviceMDData_bondTop)
, dim3(numCell), dim3(Parallel::Interface::numThreadsInCell()), 0, 0,
cellIndex,
ddata.dptr_numAtomInCell(),
cellStartIndex,
mask,
ddata.dptr_numBond(),
ddata.dptr_bondIndex(),
ddata.dptr_bondNeighbor_globalIndex(),
ddata.dptr_numAngle(),
ddata.dptr_angleIndex(),
ddata.dptr_anglePosi(),
ddata.dptr_angleNeighbor_globalIndex(),
ddata.dptr_numDihedral(),
ddata.dptr_dihedralIndex(),
ddata.dptr_dihedralPosi(),
ddata.dptr_dihedralNeighbor_globalIndex(),
ddata.bondTopStride(),
ddata.getMaxNumBond(),
ddata.getMaxNumAngle(),
ddata.getMaxNumDihedral(),
this->dptr_numBond(),
this->dptr_bondIndex(),
this->dptr_bondNeighbor_globalIndex(),
this->dptr_numAngle(),
this->dptr_angleIndex(),
this->dptr_anglePosi(),
this->dptr_angleNeighbor_globalIndex(),
this->dptr_numDihedral(),
this->dptr_dihedralIndex(),
this->dptr_dihedralPosi(),
this->dptr_dihedralNeighbor_globalIndex(),
this->bondTopStride()
);
checkCUDAError ("DeviceTransferPackage::pack, packDeviceMDData");
}
__global__ void Parallel::CudaGlobal::
packDeviceMDData (const IndexType * cellIndex,
const IndexType * numAtomInCell,
const IndexType * cellStartIndex,
const MDDataItemMask_t mask,
const CoordType * source_coord,
const CoordNoiType * source_coordNoi,
const ScalorType * source_velox,
const ScalorType * source_veloy,
const ScalorType * source_veloz,
const ScalorType * source_forcx,
const ScalorType * source_forcy,
const ScalorType * source_forcz,
const IndexType * source_globalIndex,
const TypeType * source_type,
const ScalorType * source_mass,
const ScalorType * source_charge,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType cellIdx = cellIndex[bid];
IndexType fromid = tid + cellIdx * blockDim.x;
IndexType toid = tid + cellStartIndex[bid];
if (tid < numAtomInCell[cellIdx]){
if (mask & MDDataItemMask_Coordinate){
coord[toid] = source_coord[fromid];
}
if (mask & MDDataItemMask_CoordinateNoi){
coordNoi[toid].x = source_coordNoi[fromid].x;
}
if (mask & MDDataItemMask_Velocity){
velox[toid] = source_velox[fromid];
veloy[toid] = source_veloy[fromid];
veloz[toid] = source_veloz[fromid];
}
if (mask & MDDataItemMask_Force){
forcx[toid] = source_forcx[fromid];
forcy[toid] = source_forcy[fromid];
forcz[toid] = source_forcz[fromid];
}
if (mask & MDDataItemMask_GlobalIndex){
globalIndex[toid] = source_globalIndex[fromid];
}
if (mask & MDDataItemMask_Type){
type[toid] = source_type[fromid];
}
if (mask & MDDataItemMask_Mass){
mass[toid] = source_mass[fromid];
}
if (mask & MDDataItemMask_Charge){
charge[toid] = source_charge[fromid];
}
}
}
__global__ void Parallel::CudaGlobal::
packDeviceMDData_bondTop (const IndexType * cellIndex,
const IndexType * numAtomInCell,
const IndexType * cellStartIndex,
const MDDataItemMask_t mask,
const IndexType * source_numBond,
const IndexType * source_bondIndex,
const IndexType * source_bondNeighbor_globalIndex,
const IndexType * source_numAngle,
const IndexType * source_angleIndex,
const IndexType * source_anglePosi ,
const IndexType * source_angleNeighbor_globalIndex,
const IndexType * source_numDihedral,
const IndexType * source_dihedralIndex,
const IndexType * source_dihedralPosi ,
const IndexType * source_dihedralNeighbor_globalIndex,
const IndexType source_bondTopStride,
const IndexType maxNumBond,
const IndexType maxNumAngle,
const IndexType maxNumDihedral,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi ,
IndexType * angleNeighbor_globalIndex,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi ,
IndexType * dihedralNeighbor_globalIndex,
const IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType cellIdx = cellIndex[bid];
IndexType fromid = tid + cellIdx * blockDim.x;
IndexType toid = tid + cellStartIndex[bid];
if (tid < numAtomInCell[cellIdx]){
if ((mask & MDDataItemMask_Bond) && maxNumBond != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numBond[toid] = source_numBond[fromid];
for (IndexType k = 0; k < maxNumBond; ++k){
bondIndex[my_toid] = source_bondIndex[my_fromid];
bondNeighbor_globalIndex[my_toid] = source_bondNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
if ((mask & MDDataItemMask_Angle) && maxNumAngle != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numAngle[toid] = source_numAngle[fromid];
for (IndexType k = 0; k < maxNumAngle; ++k){
angleIndex[my_toid] = source_angleIndex[my_fromid];
anglePosi [my_toid] = source_anglePosi [my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
my_fromid = fromid;
my_toid = toid;
for (IndexType k = 0; k < maxNumAngle * 2; ++k){
angleNeighbor_globalIndex[my_toid] = source_angleNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
if ((mask & MDDataItemMask_Dihedral) && maxNumDihedral != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numDihedral[toid] = source_numDihedral[fromid];
for (IndexType k = 0; k < maxNumDihedral; ++k){
dihedralIndex[my_toid] = source_dihedralIndex[my_fromid];
dihedralPosi [my_toid] = source_dihedralPosi [my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
my_fromid = fromid;
my_toid = toid;
for (IndexType k = 0; k < maxNumDihedral * 3; ++k){
dihedralNeighbor_globalIndex[my_toid] = source_dihedralNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
}
}
void Parallel::DeviceTransferPackage::
copyToHost (HostTransferPackage & hpkg) const
{
HostMDData & hdata(hpkg);
const DeviceMDData & ddata(*this);
ddata.copyToHost (hdata, myMask);
// alloc memory
if (hpkg.getMemSize() < numCell){
hpkg.easyMallocMe (numCell * MemAllocExtension);
}
hpkg.getTotalNumCell() = numCell;
hpkg.getMask() = myMask;
for (IndexType i = 0; i < numCell; ++i){
hpkg.getCellIndex()[i] = hcellIndex[i];
hpkg.getCellStartIndex()[i] = hcellStartIndex[i];
}
hpkg.getCellStartIndex()[numCell] = hcellStartIndex[numCell];
}
void Parallel::DeviceTransferPackage::
copyFromHost (const HostTransferPackage & hpkg)
{
const HostMDData & hdata(hpkg);
DeviceMDData & ddata(*this);
myMask = hpkg.getMask();
ddata.copyFromHost (hdata, myMask);
if (memSize < hpkg.getTotalNumCell()){
easyMallocMe (hpkg.getTotalNumCell() * MemAllocExtension);
}
numCell = hpkg.getTotalNumCell();
for (IndexType i = 0; i < numCell; ++i){
hcellIndex[i] = hpkg.getCellIndex()[i];
hcellStartIndex[i] = hpkg.getCellStartIndex()[i];
}
hcellStartIndex[numCell] = hpkg.getCellStartIndex()[numCell];
hipMemcpy (cellIndex, hcellIndex, sizeof(IndexType)*numCell,
hipMemcpyHostToDevice);
hipMemcpy (cellStartIndex, hcellStartIndex, sizeof(IndexType)*(numCell+1),
hipMemcpyHostToDevice);
checkCUDAError ("DeviceTransferPackage::copyFromHost cpy from host");
}
void Parallel::DeviceTransferPackage::
unpack_replace (DeviceCellListedMDData & ddata) const
{
hipLaunchKernelGGL(( Parallel::CudaGlobal::unpackDeviceMDData_replace)
, dim3(numCell), dim3(Parallel::Interface::numThreadsInCell()), 0, 0,
cellIndex,
cellStartIndex,
myMask,
this->dptr_coordinate(),
this->dptr_coordinateNoi(),
this->dptr_velocityX(),
this->dptr_velocityY(),
this->dptr_velocityZ(),
this->dptr_forceX(),
this->dptr_forceY(),
this->dptr_forceZ(),
this->dptr_globalIndex(),
this->dptr_type(),
this->dptr_mass(),
this->dptr_charge(),
ddata.numAtomInCell,
ddata.dptr_coordinate(),
ddata.dptr_coordinateNoi(),
ddata.dptr_velocityX(),
ddata.dptr_velocityY(),
ddata.dptr_velocityZ(),
ddata.dptr_forceX(),
ddata.dptr_forceY(),
ddata.dptr_forceZ(),
ddata.dptr_globalIndex(),
ddata.dptr_type(),
ddata.dptr_mass(),
ddata.dptr_charge());
hipLaunchKernelGGL(( Parallel::CudaGlobal::unpackDeviceMDData_bondTop_replace)
, dim3(numCell), dim3(Parallel::Interface::numThreadsInCell()), 0, 0,
cellIndex,
cellStartIndex,
myMask,
this->dptr_numBond(),
this->dptr_bondIndex(),
this->dptr_bondNeighbor_globalIndex(),
this->dptr_numAngle(),
this->dptr_angleIndex(),
this->dptr_anglePosi(),
this->dptr_angleNeighbor_globalIndex(),
this->dptr_numDihedral(),
this->dptr_dihedralIndex(),
this->dptr_dihedralPosi(),
this->dptr_dihedralNeighbor_globalIndex(),
this->bondTopStride(),
this->getMaxNumBond(),
this->getMaxNumAngle(),
this->getMaxNumDihedral(),
ddata.dptr_numBond(),
ddata.dptr_bondIndex(),
ddata.dptr_bondNeighbor_globalIndex(),
ddata.dptr_numAngle(),
ddata.dptr_angleIndex(),
ddata.dptr_anglePosi(),
ddata.dptr_angleNeighbor_globalIndex(),
ddata.dptr_numDihedral(),
ddata.dptr_dihedralIndex(),
ddata.dptr_dihedralPosi(),
ddata.dptr_dihedralNeighbor_globalIndex(),
ddata.bondTopStride());
checkCUDAError ("DeviceTransferPackage::unpack_replace");
}
void Parallel::DeviceTransferPackage::
unpack_add (DeviceCellListedMDData & ddata) const
{
IndexType totalNumInCell = ddata.getNumCell().x * ddata.getNumCell().y * ddata.getNumCell().z;
IndexType * oldNumAtomInCell;
size_t size = totalNumInCell * sizeof(IndexType);
hipMalloc ((void**)&oldNumAtomInCell, size);
checkCUDAError ("DeviceTransferPackage::unpack_add, malloc oldNumAtomInCell");
hipMemcpy (oldNumAtomInCell, ddata.dptr_numAtomInCell(), size,
hipMemcpyDeviceToDevice);
checkCUDAError ("DeviceTransferPackage::unpack_add, copy oldNumAtomInCell");
hipLaunchKernelGGL(( Parallel::CudaGlobal::unpackDeviceMDData_add)
, dim3(numCell), dim3(Parallel::Interface::numThreadsInCell()), 0, 0,
cellIndex,
cellStartIndex,
myMask,
this->dptr_coordinate(),
this->dptr_coordinateNoi(),
this->dptr_velocityX(),
this->dptr_velocityY(),
this->dptr_velocityZ(),
this->dptr_forceX(),
this->dptr_forceY(),
this->dptr_forceZ(),
this->dptr_globalIndex(),
this->dptr_type(),
this->dptr_mass(),
this->dptr_charge(),
ddata.dptr_numAtomInCell(),
ddata.dptr_coordinate(),
ddata.dptr_coordinateNoi(),
ddata.dptr_velocityX(),
ddata.dptr_velocityY(),
ddata.dptr_velocityZ(),
ddata.dptr_forceX(),
ddata.dptr_forceY(),
ddata.dptr_forceZ(),
ddata.dptr_globalIndex(),
ddata.dptr_type(),
ddata.dptr_mass(),
ddata.dptr_charge(),
err.ptr_de);
hipLaunchKernelGGL(( Parallel::CudaGlobal::unpackDeviceMDData_bondTop_add)
, dim3(numCell), dim3(Parallel::Interface::numThreadsInCell()), 0, 0,
cellIndex,
cellStartIndex,
oldNumAtomInCell,
myMask,
this->dptr_numBond(),
this->dptr_bondIndex(),
this->dptr_bondNeighbor_globalIndex(),
this->dptr_numAngle(),
this->dptr_angleIndex(),
this->dptr_anglePosi(),
this->dptr_angleNeighbor_globalIndex(),
this->dptr_numDihedral(),
this->dptr_dihedralIndex(),
this->dptr_dihedralPosi(),
this->dptr_dihedralNeighbor_globalIndex(),
this->bondTopStride(),
this->getMaxNumBond(),
this->getMaxNumAngle(),
this->getMaxNumDihedral(),
ddata.dptr_numBond(),
ddata.dptr_bondIndex(),
ddata.dptr_bondNeighbor_globalIndex(),
ddata.dptr_numAngle(),
ddata.dptr_angleIndex(),
ddata.dptr_anglePosi(),
ddata.dptr_angleNeighbor_globalIndex(),
ddata.dptr_numDihedral(),
ddata.dptr_dihedralIndex(),
ddata.dptr_dihedralPosi(),
ddata.dptr_dihedralNeighbor_globalIndex(),
ddata.bondTopStride(),
err.ptr_de);
checkCUDAError ("DeviceTransferPackage::unpack_add");
err.updateHost ();
hipFree (oldNumAtomInCell);
err.check ("DeviceTransferPackage::unpack_add");
}
__global__ void Parallel::CudaGlobal::
unpackDeviceMDData_replace (const IndexType * cellIndex,
const IndexType * cellStartIndex,
const MDDataItemMask_t mask,
const CoordType * source_coord,
const CoordNoiType * source_coordNoi,
const ScalorType * source_velox,
const ScalorType * source_veloy,
const ScalorType * source_veloz,
const ScalorType * source_forcx,
const ScalorType * source_forcy,
const ScalorType * source_forcz,
const IndexType * source_globalIndex,
const TypeType * source_type,
const ScalorType * source_mass,
const ScalorType * source_charge,
IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType cellIdx = cellIndex[bid];
IndexType startIdx = cellStartIndex[bid];
IndexType numAtomInThisCell = cellStartIndex[bid+1] - startIdx;
IndexType toid = tid + cellIdx * blockDim.x;
IndexType fromid = tid + startIdx;
if (tid == blockDim.x-1){
numAtomInCell[cellIdx] = numAtomInThisCell;
}
if (tid < numAtomInThisCell){
if (mask & MDDataItemMask_Coordinate){
coord[toid] = source_coord[fromid];
}
if (mask & MDDataItemMask_CoordinateNoi){
coordNoi[toid].x = source_coordNoi[fromid].x;
coordNoi[toid].y = source_coordNoi[fromid].y;
coordNoi[toid].z = source_coordNoi[fromid].z;
}
if (mask & MDDataItemMask_Velocity){
velox[toid] = source_velox[fromid];
veloy[toid] = source_veloy[fromid];
veloz[toid] = source_veloz[fromid];
}
if (mask & MDDataItemMask_Force){
forcx[toid] = source_forcx[fromid];
forcy[toid] = source_forcy[fromid];
forcz[toid] = source_forcz[fromid];
}
if (mask & MDDataItemMask_GlobalIndex){
globalIndex[toid] = source_globalIndex[fromid];
}
if (mask & MDDataItemMask_Type){
type[toid] = source_type[fromid];
}
if (mask & MDDataItemMask_Mass){
mass[toid] = source_mass[fromid];
}
if (mask & MDDataItemMask_Charge){
charge[toid] = source_charge[fromid];
}
}
}
__global__ void Parallel::CudaGlobal::
unpackDeviceMDData_bondTop_replace (const IndexType * cellIndex,
const IndexType * cellStartIndex,
const MDDataItemMask_t mask,
const IndexType * source_numBond,
const IndexType * source_bondIndex,
const IndexType * source_bondNeighbor_globalIndex,
const IndexType * source_numAngle,
const IndexType * source_angleIndex,
const IndexType * source_anglePosi ,
const IndexType * source_angleNeighbor_globalIndex,
const IndexType * source_numDihedral,
const IndexType * source_dihedralIndex,
const IndexType * source_dihedralPosi ,
const IndexType * source_dihedralNeighbor_globalIndex,
const IndexType source_bondTopStride,
const IndexType maxNumBond,
const IndexType maxNumAngle,
const IndexType maxNumDihedral,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi ,
IndexType * angleNeighbor_globalIndex,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi ,
IndexType * dihedralNeighbor_globalIndex,
const IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType cellIdx = cellIndex[bid];
IndexType startIdx = cellStartIndex[bid];
IndexType numAtomInThisCell = cellStartIndex[bid+1] - startIdx;
IndexType toid = tid + cellIdx * blockDim.x;
IndexType fromid = tid + startIdx;
if (tid < numAtomInThisCell){
if ((mask & MDDataItemMask_Bond) && maxNumBond != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numBond[toid] = source_numBond[fromid];
for (IndexType k = 0; k < maxNumBond; ++k){
bondIndex[my_toid] = source_bondIndex[my_fromid];
bondNeighbor_globalIndex[my_toid] = source_bondNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
if ((mask & MDDataItemMask_Angle) && maxNumAngle != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numAngle[toid] = source_numAngle[fromid];
for (IndexType k = 0; k < maxNumAngle; ++k){
angleIndex[my_toid] = source_angleIndex[my_fromid];
anglePosi [my_toid] = source_anglePosi [my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
my_fromid = fromid;
my_toid = toid;
for (IndexType k = 0; k < maxNumAngle * 2; ++k){
angleNeighbor_globalIndex[my_toid] = source_angleNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
if ((mask & MDDataItemMask_Dihedral) && maxNumDihedral != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numDihedral[toid] = source_numDihedral[fromid];
for (IndexType k = 0; k < maxNumDihedral; ++k){
dihedralIndex[my_toid] = source_dihedralIndex[my_fromid];
dihedralPosi [my_toid] = source_dihedralPosi [my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
my_fromid = fromid;
my_toid = toid;
for (IndexType k = 0; k < maxNumDihedral * 3; ++k){
dihedralNeighbor_globalIndex[my_toid] = source_dihedralNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
}
}
__global__ void Parallel::CudaGlobal::
unpackDeviceMDData_add (const IndexType * cellIndex,
const IndexType * cellStartIndex,
const MDDataItemMask_t mask,
const CoordType * source_coord,
const CoordNoiType * source_coordNoi,
const ScalorType * source_velox,
const ScalorType * source_veloy,
const ScalorType * source_veloz,
const ScalorType * source_forcx,
const ScalorType * source_forcy,
const ScalorType * source_forcz,
const IndexType * source_globalIndex,
const TypeType * source_type,
const ScalorType * source_mass,
const ScalorType * source_charge,
IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
__shared__ volatile IndexType tmpbuff[2];
if (tid < 2){
tmpbuff[tid] = cellStartIndex[bid+tid];
}
__syncthreads();
IndexType startIdx = tmpbuff[0];
IndexType numAdded = tmpbuff[1] - startIdx;
// IndexType startIdx = cellStartIndex[bid];
// IndexType numAdded = cellStartIndex[bid+1] - startIdx;
if (numAdded == 0) return;
IndexType cellIdx = cellIndex[bid];
IndexType alreadyInCell = numAtomInCell[cellIdx];
IndexType toid = tid + cellIdx * blockDim.x + alreadyInCell;
IndexType fromid = tid + startIdx;
// __shared__ IndexType numAtomInThisCell;
__syncthreads();
__shared__ volatile bool failed ;
if (tid == 0){
if (((numAtomInCell[cellIdx] += numAdded)) > blockDim.x &&
ptr_de != NULL){
*ptr_de = mdErrorShortCellList;
failed = true;
}
else {
failed = false;
}
}
__syncthreads();
if (failed) return;
if (tid < numAdded){
if (mask & MDDataItemMask_Coordinate){
coord[toid] = source_coord[fromid];
}
if (mask & MDDataItemMask_CoordinateNoi){
coordNoi[toid].x = source_coordNoi[fromid].x;
coordNoi[toid].y = source_coordNoi[fromid].y;
coordNoi[toid].z = source_coordNoi[fromid].z;
}
if (mask & MDDataItemMask_Velocity){
velox[toid] = source_velox[fromid];
veloy[toid] = source_veloy[fromid];
veloz[toid] = source_veloz[fromid];
}
if (mask & MDDataItemMask_Force){
forcx[toid] = source_forcx[fromid];
forcy[toid] = source_forcy[fromid];
forcz[toid] = source_forcz[fromid];
}
if (mask & MDDataItemMask_GlobalIndex){
globalIndex[toid] = source_globalIndex[fromid];
}
if (mask & MDDataItemMask_Type){
type[toid] = source_type[fromid];
}
if (mask & MDDataItemMask_Mass){
mass[toid] = source_mass[fromid];
}
if (mask & MDDataItemMask_Charge){
charge[toid] = source_charge[fromid];
}
}
}
__global__ void Parallel::CudaGlobal::
unpackDeviceMDData_bondTop_add (const IndexType * cellIndex,
const IndexType * cellStartIndex,
const IndexType * oldNumAtomInCell,
const MDDataItemMask_t mask,
const IndexType * source_numBond,
const IndexType * source_bondIndex,
const IndexType * source_bondNeighbor_globalIndex,
const IndexType * source_numAngle,
const IndexType * source_angleIndex,
const IndexType * source_anglePosi ,
const IndexType * source_angleNeighbor_globalIndex,
const IndexType * source_numDihedral,
const IndexType * source_dihedralIndex,
const IndexType * source_dihedralPosi ,
const IndexType * source_dihedralNeighbor_globalIndex,
const IndexType source_bondTopStride,
const IndexType maxNumBond,
const IndexType maxNumAngle,
const IndexType maxNumDihedral,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi ,
IndexType * angleNeighbor_globalIndex,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi ,
IndexType * dihedralNeighbor_globalIndex,
const IndexType bondTopStride,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
__shared__ volatile IndexType tmpbuff[2];
if (tid < 2){
tmpbuff[tid] = cellStartIndex[bid+tid];
}
__syncthreads();
IndexType startIdx = tmpbuff[0];
IndexType numAdded = tmpbuff[1] - startIdx;
if (numAdded == 0) return;
IndexType cellIdx = cellIndex[bid];
IndexType toid = tid + cellIdx * blockDim.x + oldNumAtomInCell[cellIdx];
IndexType fromid = tid + startIdx;
if (tid < numAdded){
if ((mask & MDDataItemMask_Bond) && maxNumBond != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numBond[toid] = source_numBond[fromid];
for (IndexType k = 0; k < maxNumBond; ++k){
bondIndex[my_toid] = source_bondIndex[my_fromid];
bondNeighbor_globalIndex[my_toid] = source_bondNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
if ((mask & MDDataItemMask_Angle) && maxNumAngle != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numAngle[toid] = source_numAngle[fromid];
for (IndexType k = 0; k < maxNumAngle; ++k){
angleIndex[my_toid] = source_angleIndex[my_fromid];
anglePosi [my_toid] = source_anglePosi [my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
my_fromid = fromid;
my_toid = toid;
for (IndexType k = 0; k < maxNumAngle * 2; ++k){
angleNeighbor_globalIndex[my_toid] = source_angleNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
if ((mask & MDDataItemMask_Dihedral) && maxNumDihedral != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numDihedral[toid] = source_numDihedral[fromid];
for (IndexType k = 0; k < maxNumDihedral; ++k){
dihedralIndex[my_toid] = source_dihedralIndex[my_fromid];
dihedralPosi [my_toid] = source_dihedralPosi [my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
my_fromid = fromid;
my_toid = toid;
for (IndexType k = 0; k < maxNumDihedral * 3; ++k){
dihedralNeighbor_globalIndex[my_toid] = source_dihedralNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
}
}
void Parallel::DeviceCellListedMDData::
copyToHost (HostCellListedMDData & hdata,
const MDDataItemMask_t mask) const
{
const DeviceMDData & ddata (*this);
ddata.copyToHost (hdata, mask);
hdata.rlist = rlist;
hdata.devideLevel = devideLevel;
hdata.numCell.x = numCell.x;
hdata.numCell.y = numCell.y;
hdata.numCell.z = numCell.z;
hdata.frameUp.x = frameUp.x;
hdata.frameUp.y = frameUp.y;
hdata.frameUp.z = frameUp.z;
hdata.frameLow.x = frameLow.x;
hdata.frameLow.y = frameLow.y;
hdata.frameLow.z = frameLow.z;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
size_t size = totalNumCell * sizeof (IndexType);
if (hdata.HostCellListedMDData::memSize < totalNumCell){
hdata.easyReallocCell (totalNumCell * MemAllocExtension);
}
hipMemcpy (hdata.numAtomInCell, numAtomInCell, size,
hipMemcpyDeviceToHost);
checkCUDAError ("DeviceCellListedMDData::copyToHost copy numAtomInCell");
}
void Parallel::DeviceCellListedMDData::
copyFromHost (const HostCellListedMDData & hdata,
const MDDataItemMask_t mask)
{
DeviceMDData & ddata(*this);
ddata.copyFromHost (hdata, mask);
rlist = hdata.rlist;
devideLevel = hdata.devideLevel;
numCell.x = hdata.numCell.x;
numCell.y = hdata.numCell.y;
numCell.z = hdata.numCell.z;
frameUp.x = hdata.frameUp.x;
frameUp.y = hdata.frameUp.y;
frameUp.z = hdata.frameUp.z;
frameLow.x = hdata.frameLow.x;
frameLow.y = hdata.frameLow.y;
frameLow.z = hdata.frameLow.z;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
size_t size = totalNumCell * sizeof (IndexType);
if (memSize < totalNumCell){
easyMallocCell (totalNumCell * MemAllocExtension);
}
hipMemcpy (numAtomInCell, hdata.numAtomInCell, size,
hipMemcpyHostToDevice);
checkCUDAError ("DeviceCellListedMDData::copyFromHost copy numAtomInCell");
}
void Parallel::DeviceCellListedMDData::
copyFromDevice (const DeviceCellListedMDData & ddata,
const MDDataItemMask_t mask)
{
DeviceMDData & me(*this);
me.copyFromDevice (ddata, mask);
rlist = ddata.rlist;
devideLevel = ddata.devideLevel;
numCell.x = ddata.numCell.x;
numCell.y = ddata.numCell.y;
numCell.z = ddata.numCell.z;
frameUp.x = ddata.frameUp.x;
frameUp.y = ddata.frameUp.y;
frameUp.z = ddata.frameUp.z;
frameLow.x = ddata.frameLow.x;
frameLow.y = ddata.frameLow.y;
frameLow.z = ddata.frameLow.z;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
size_t size = totalNumCell * sizeof (IndexType);
if (memSize < totalNumCell){
easyMallocCell (totalNumCell * MemAllocExtension);
}
hipMemcpy (numAtomInCell, ddata.numAtomInCell, size,
hipMemcpyDeviceToDevice);
checkCUDAError ("DeviceCellListedMDData::copyFromDevice copy numAtomInCell");
}
void Parallel::DeviceCellListedMDData::
clearData (const SubCellList & subList)
{
IndexType * tmpList;
IndexType * deviceList;
IndexType num = subList.size();
size_t size = sizeof(IndexType) * num;
tmpList = (IndexType * )malloc (size);
if (tmpList == NULL){
throw MDExcptFailedMallocOnHost ("DeviceCellListedMDData::clearData",
"tmpList", size);
}
hipMalloc ((void**)&deviceList, size);
checkCUDAError ("DeviceCellListedMDData::clearData, malloc deviceList");
for (IndexType i = 0; i < num; ++i){
tmpList[i] = subList[i];
}
hipMemcpy (deviceList, tmpList, size, hipMemcpyHostToDevice);
freeAPointer ((void**)&tmpList);
hipLaunchKernelGGL(( Parallel::CudaGlobal::clearCellListData)
, dim3((num + DefaultNThreadPerBlock -1) / DefaultNThreadPerBlock),
dim3(DefaultNThreadPerBlock), 0, 0,
deviceList,
num,
numAtomInCell);
checkCUDAError ("DeviceCellListedMDData::clearData, clearCellListData");
hipFree (deviceList);
checkCUDAError ("DeviceCellListedMDData::clearData, free deviceList");
}
void __global__
Parallel::CudaGlobal::
clearCellListData (const IndexType * deviceList,
IndexType num,
IndexType * numAtomInCell)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
if (ii < num){
numAtomInCell[deviceList[ii]] = 0;
}
}
__global__ void Parallel::CudaGlobal::
normalizeSystem_CellListed (RectangularBox box,
const IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
if (tid < numAtomInCell[bid]) {
RectangularBoxGeometry::moveParticleToBox_1image (
box.size.x, &(coord[ii].x), &(coordNoi[ii].x));
RectangularBoxGeometry::moveParticleToBox_1image (
box.size.y, &(coord[ii].y), &(coordNoi[ii].y));
RectangularBoxGeometry::moveParticleToBox_1image (
box.size.z, &(coord[ii].z), &(coordNoi[ii].z));
}
}
void Parallel::DeviceCellListedMDData::
applyPeriodicBondaryCondition ()
{
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
IndexType numThreadsInCell = Parallel::Interface::numThreadsInCell ();
hipLaunchKernelGGL(( Parallel::CudaGlobal::normalizeSystem_CellListed)
, dim3(totalNumCell), dim3(numThreadsInCell), 0, 0,
globalBox,
numAtomInCell,
coord,
coordNoi);
checkCUDAError ("DeviceCellListedMDData::applyPeriodicBondaryCondition");
}
__global__ void Parallel::CudaGlobal::
normalizeSystemOnGhost_CellListed (RectangularBox box,
const IndexType * numAtomInCell,
const IntVectorType numCell,
const IndexType divideLevel,
const int nx,
const int ny,
const int nz,
const int rankx,
const int ranky,
const int rankz,
CoordType * coord,
CoordNoiType * coordNoi)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
IndexType this_numAtomInCell = numAtomInCell[bid];
IndexType ix, iy, iz;
Parallel::CudaDevice::D1toD3 (numCell, bid, ix, iy, iz);
if (rankx == 0 && ix < divideLevel){
if (tid < this_numAtomInCell){
coord[ii].x += box.size.x;
coordNoi[ii].x -= 1;
}
}
if (rankx == nx - 1 && ix >= numCell.x - divideLevel){
if (tid < this_numAtomInCell){
coord[ii].x -= box.size.x;
coordNoi[ii].x += 1;
}
}
if (ranky == 0 && iy < divideLevel){
if (tid < this_numAtomInCell){
coord[ii].y += box.size.y;
coordNoi[ii].y -= 1;
}
}
if (ranky == ny - 1 && iy >= numCell.y - divideLevel){
if (tid < this_numAtomInCell){
coord[ii].y -= box.size.y;
coordNoi[ii].y += 1;
}
}
if (rankz == 0 && iz < divideLevel){
if (tid < this_numAtomInCell){
coord[ii].z += box.size.z;
coordNoi[ii].z -= 1;
}
}
if (rankz == nz - 1 && iz >= numCell.z - divideLevel){
if (tid < this_numAtomInCell){
coord[ii].z -= box.size.z;
coordNoi[ii].z += 1;
}
}
// if (tid < numAtomInCell[bid]) {
// RectangularBoxGeometry::moveParticleToBox_1image (
// box.size.x, &(coord[ii].x), &(coordNoi[ii].x));
// RectangularBoxGeometry::moveParticleToBox_1image (
// box.size.y, &(coord[ii].y), &(coordNoi[ii].y));
// RectangularBoxGeometry::moveParticleToBox_1image (
// box.size.z, &(coord[ii].z), &(coordNoi[ii].z));
// }
}
void Parallel::DeviceCellListedMDData::
applyPeriodicBondaryConditionOnGhostCells ()
{
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
IndexType numThreadsInCell = Parallel::Interface::numThreadsInCell ();
int Nx, Ny, Nz;
Parallel::Interface::numProcDim (Nx, Ny, Nz);
int rankx, ranky, rankz;
Parallel::Interface::rankToCartCoord (Parallel::Interface::myRank(),
rankx, ranky, rankz);
hipLaunchKernelGGL(( Parallel::CudaGlobal::normalizeSystemOnGhost_CellListed)
, dim3(totalNumCell), dim3(numThreadsInCell), 0, 0,
globalBox,
numAtomInCell,
numCell,
devideLevel,
Nx, Ny, Nz,
rankx, ranky, rankz,
coord,
coordNoi);
checkCUDAError ("DeviceCellListedMDData::applyPeriodicBondaryCondition");
}
void Parallel::DeviceCellRelation::
easyMalloc (const IndexType & totalNumCell_,
const IndexType & MaxNeiPerCell_)
{
if (malloced){
clear ();
}
totalNumCell = totalNumCell_;
MaxNeiPerCell = MaxNeiPerCell_;
hipMalloc ((void**)&numNeighbor, totalNumCell * sizeof(IndexType));
hipMalloc ((void**)&neighborCellIndex,
totalNumCell * MaxNeiPerCell * sizeof (IndexType));
hipMalloc ((void**)&neighborShiftNoi,
totalNumCell * MaxNeiPerCell * sizeof (CoordNoiType));
checkCUDAError ("DeviceCellRelation::easyMalloc");
malloced = true;
}
void Parallel::DeviceCellRelation::
clear ()
{
if (malloced){
hipFree (numNeighbor);
hipFree (neighborCellIndex);
hipFree (neighborShiftNoi);
malloced = false;
}
}
Parallel::DeviceCellRelation::
~DeviceCellRelation ()
{
clear();
}
void Parallel::DeviceCellRelation::
rebuild (const DeviceCellListedMDData & list)
{
ptr_list = &list;
IntVectorType numCell = list.getNumCell ();
totalNumCell = numCell.x * numCell.y * numCell.z;
MaxNeiPerCell = 2 * list.getDevideLevel() + 1;
MaxNeiPerCell = MaxNeiPerCell * MaxNeiPerCell * MaxNeiPerCell;
int Nx, Ny, Nz;
Parallel::Interface::numProcDim (Nx, Ny, Nz);
easyMalloc (totalNumCell, MaxNeiPerCell);
int rankx, ranky, rankz;
Parallel::Interface::rankToCartCoord (Parallel::Interface::myRank(), rankx, ranky, rankz);
hipLaunchKernelGGL(( Parallel::CudaGlobal::buildCellNeighborhood)
, dim3(toGridDim(totalNumCell)), dim3(1), 0, 0,
numCell,
list.getDevideLevel(),
list.getRlist(),
list.getGlobalBoxSize(),
rankx, ranky, rankz,
Nx, Ny, Nz,
numNeighbor,
neighborCellIndex,
neighborShiftNoi,
MaxNeiPerCell);
checkCUDAError ("DeviceCellRelation::build, buildCellNeighborhood");
}
__global__ void Parallel::CudaGlobal::
buildCellNeighborhood (const IntVectorType numCell,
const IndexType devideLevel,
const ScalorType rlist,
const HostVectorType boxSize,
IndexType * numNeighbor,
IndexType * neighborCellIndex,
const IndexType stride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
__shared__ bool stop;
numNeighbor[bid] = 0;
int centerx, centery, centerz;
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (numCell.x == 3) oneCellX = true;
if (numCell.y == 3) oneCellY = true;
if (numCell.z == 3) oneCellZ = true;
if (tid == 0){
stop = false;
Parallel::CudaDevice::D1toD3 (numCell, int(bid), centerx, centery, centerz);
if (oneCellX){
if (centerx == 0 || centerx == 2){
stop = true;
}
}
else {
if (centerx < devideLevel || centerx >= numCell.x - devideLevel){
stop = true;
}
}
if (oneCellY){
if (centery == 0 || centery == 2){
stop = true;
}
}
else {
if (centery < devideLevel || centery >= numCell.y - devideLevel){
stop = true;
}
}
if (oneCellZ){
if (centerz == 0 || centerz == 2){
stop = true;
}
}
else {
if (centerz < devideLevel || centerz >= numCell.z - devideLevel){
stop = true;
}
}
}
__syncthreads();
if (stop) return;
if (tid == 0){
int lowerX (-devideLevel);
int lowerY (-devideLevel);
int lowerZ (-devideLevel);
if (oneCellX) lowerX = -1;
if (oneCellY) lowerY = -1;
if (oneCellZ) lowerZ = -1;
int upperX (devideLevel+1);
int upperY (devideLevel+1);
int upperZ (devideLevel+1);
if (oneCellX) upperX = 2;
if (oneCellY) upperY = 2;
if (oneCellZ) upperZ = 2;
ScalorType scalorx, scalory, scalorz;
oneCellX ? scalorx = boxSize.x :
scalorx = boxSize.x / ScalorType(numCell.x - (devideLevel << 1));
oneCellY ? scalory = boxSize.y :
scalory = boxSize.y / ScalorType(numCell.y - (devideLevel << 1));
oneCellZ ? scalorz = boxSize.z :
scalorz = boxSize.z / ScalorType(numCell.z - (devideLevel << 1));
ScalorType rlist2 = rlist * rlist;
for (int ix = lowerX; ix < upperX; ++ix){
for (int iy = lowerY; iy < upperY; ++iy){
for (int iz = lowerZ; iz < upperZ; ++iz){
int myx = ix + int(centerx);
int myy = iy + int(centery);
int myz = iz + int(centerz);
ScalorType min = 1e9;
#pragma unroll 27
for (int dx = -1; dx <= 1; ++dx){
for (int dy = -1; dy <= 1; ++dy){
for (int dz = -1; dz <= 1; ++dz){
ScalorType diffx ((-centerx + myx + dx) * scalorx);
ScalorType diffy ((-centery + myy + dy) * scalory);
ScalorType diffz ((-centerz + myz + dz) * scalorz);
// shortestImage (box, &diffx, &diffy, &diffz);
ScalorType diff2 (diffx * diffx + diffy * diffy + diffz * diffz);
if (diff2 < min){
min = diff2;
}
}
}
}
if (min < rlist2){
IndexType tmp = Parallel::CudaDevice::D3toD1 (numCell, myx, myy, myz);
neighborCellIndex[(numNeighbor[bid]++) + bid * stride] = tmp;
}
}
}
}
}
}
__global__ void Parallel::CudaGlobal::
buildCellNeighborhood (const IntVectorType numCell,
const IndexType devideLevel,
const ScalorType rlist,
const HostVectorType globalBoxSize,
const int rankx,
const int ranky,
const int rankz,
const int nProcDimx,
const int nProcDimy,
const int nProcDimz,
IndexType * numNeighbor,
IndexType * neighborCellIndex,
CoordNoiType * neighborShiftNoi,
const IndexType stride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
__shared__ bool stop;
numNeighbor[bid] = 0;
int centerx, centery, centerz;
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (numCell.x == 3) oneCellX = true;
if (numCell.y == 3) oneCellY = true;
if (numCell.z == 3) oneCellZ = true;
dim3 boxSize;
if (tid == 0){
boxSize.x = globalBoxSize.x / nProcDimx;
boxSize.y = globalBoxSize.y / nProcDimy;
boxSize.z = globalBoxSize.z / nProcDimz;
stop = false;
Parallel::CudaDevice::D1toD3 (numCell, int(bid), centerx, centery, centerz);
if (oneCellX){
if (centerx == 0 || centerx == 2){
stop = true;
}
}
else {
if (centerx < devideLevel || centerx >= numCell.x - devideLevel){
stop = true;
}
}
if (oneCellY){
if (centery == 0 || centery == 2){
stop = true;
}
}
else {
if (centery < devideLevel || centery >= numCell.y - devideLevel){
stop = true;
}
}
if (oneCellZ){
if (centerz == 0 || centerz == 2){
stop = true;
}
}
else {
if (centerz < devideLevel || centerz >= numCell.z - devideLevel){
stop = true;
}
}
}
__syncthreads();
if (stop) return;
if (tid == 0){
int lowerX (-devideLevel);
int lowerY (-devideLevel);
int lowerZ (-devideLevel);
if (oneCellX) lowerX = -1;
if (oneCellY) lowerY = -1;
if (oneCellZ) lowerZ = -1;
int upperX (devideLevel+1);
int upperY (devideLevel+1);
int upperZ (devideLevel+1);
if (oneCellX) upperX = 2;
if (oneCellY) upperY = 2;
if (oneCellZ) upperZ = 2;
ScalorType scalorx, scalory, scalorz;
oneCellX ? scalorx = boxSize.x :
scalorx = boxSize.x / ScalorType(numCell.x - (devideLevel << 1));
oneCellY ? scalory = boxSize.y :
scalory = boxSize.y / ScalorType(numCell.y - (devideLevel << 1));
oneCellZ ? scalorz = boxSize.z :
scalorz = boxSize.z / ScalorType(numCell.z - (devideLevel << 1));
ScalorType rlist2 = rlist * rlist;
CoordNoiType myshift ;
for (int ix = lowerX; ix < upperX; ++ix){
for (int iy = lowerY; iy < upperY; ++iy){
for (int iz = lowerZ; iz < upperZ; ++iz){
int myx = ix + int(centerx);
int myy = iy + int(centery);
int myz = iz + int(centerz);
myshift.x = myshift.y = myshift.z = 0;
if (rankx == 0 && myx < devideLevel) {
myshift.x = - 1;
}
else if (rankx == nProcDimx - 1 && myx >= int(numCell.x - devideLevel)){
myshift.x = 1;
}
if (ranky == 0 && myy < devideLevel) {
myshift.y = - 1;
}
else if (ranky == nProcDimy - 1 && myy >= int(numCell.y - devideLevel)){
myshift.y = 1;
}
if (rankz == 0 && myz < devideLevel) {
myshift.z = - 1;
}
else if (rankz == nProcDimz - 1 && myz >= int(numCell.z - devideLevel)){
myshift.z = 1;
}
ScalorType min = 1e9;
#pragma unroll 27
for (int dx = -1; dx <= 1; ++dx){
for (int dy = -1; dy <= 1; ++dy){
for (int dz = -1; dz <= 1; ++dz){
ScalorType diffx ((-centerx + myx + dx) * scalorx);
ScalorType diffy ((-centery + myy + dy) * scalory);
ScalorType diffz ((-centerz + myz + dz) * scalorz);
// shortestImage (box, &diffx, &diffy, &diffz);
ScalorType diff2 (diffx * diffx + diffy * diffy + diffz * diffz);
if (diff2 < min){
min = diff2;
}
}
}
}
if (min < rlist2){
IndexType tmp = Parallel::CudaDevice::D3toD1 (numCell, myx, myy, myz);
neighborShiftNoi [(numNeighbor[bid] ) + bid * stride] = myshift;
neighborCellIndex[(numNeighbor[bid]++) + bid * stride] = tmp;
}
}
}
}
}
}
void Parallel::DeviceCellRelation::
copyToHost (HostCellRelation & hrelation)
{
hrelation.easyMalloc (totalNumCell, MaxNeiPerCell);
hipMemcpy (hrelation.cptr_numNeighborCell(),
numNeighbor,
sizeof(IndexType) * totalNumCell,
hipMemcpyDeviceToHost);
hipMemcpy (hrelation.cptr_neighborCellIndex(),
neighborCellIndex,
sizeof(IndexType) * totalNumCell * MaxNeiPerCell,
hipMemcpyDeviceToHost);
hipMemcpy (hrelation.cptr_neighborShiftNoi(),
neighborShiftNoi,
sizeof(CoordNoiType) * totalNumCell * MaxNeiPerCell,
hipMemcpyDeviceToHost);
checkCUDAError ("DeviceCellRelation::copyToHost");
}
void Parallel::DeviceCellRelation::
rebuild (const DeviceCellListedMDData & list,
const SubCellList & sub0,
const SubCellList & sub1)
{
IndexType * tmpHost;
IndexType * deviceSub0;
IndexType * deviceSub1;
size_t size0 = sizeof (IndexType) * sub0.size();
size_t size1 = sizeof (IndexType) * sub1.size();
tmpHost = (IndexType *) malloc (size0);
if (tmpHost == NULL){
throw MDExcptFailedMallocOnHost ("DeviceCellListedMDData::build",
"tmpHost", size0);
}
hipMalloc ((void**)&deviceSub0, size0);
checkCUDAError ("DeviceCellListedMDData::build, malloc deviceSub0");
for (IndexType i = 0; i < sub0.size(); ++i){
tmpHost[i] = sub0[i];
}
hipMemcpy (deviceSub0, tmpHost, size0, hipMemcpyHostToDevice);
checkCUDAError ("DeviceCellListedMDData::build, copy to deviceSub0");
free (tmpHost);
tmpHost = (IndexType *) malloc (size1);
if (tmpHost == NULL){
throw MDExcptFailedMallocOnHost ("DeviceCellListedMDData::build",
"tmpHost", size1);
}
hipMalloc ((void**)&deviceSub1, size1);
checkCUDAError ("DeviceCellListedMDData::build, malloc deviceSub1");
for (IndexType i = 0; i < sub1.size(); ++i){
tmpHost[i] = sub1[i];
}
hipMemcpy (deviceSub1, tmpHost, size1, hipMemcpyHostToDevice);
checkCUDAError ("DeviceCellListedMDData::build, copy to deviceSub1");
free (tmpHost);
ptr_list = & list;
IntVectorType numCell = list.getNumCell ();
totalNumCell = numCell.x * numCell.y * numCell.z;
MaxNeiPerCell = 2 * list.getDevideLevel() + 1;
MaxNeiPerCell = MaxNeiPerCell * MaxNeiPerCell * MaxNeiPerCell;
int Nx, Ny, Nz;
Parallel::Interface::numProcDim (Nx, Ny, Nz);
easyMalloc (totalNumCell, MaxNeiPerCell);
int rankx, ranky, rankz;
Parallel::Interface::rankToCartCoord
(Parallel::Interface::myRank(), rankx, ranky, rankz);
checkCUDAError ("DeviceCellRelation::build, buildCellNeighborhood");
hipLaunchKernelGGL(( Parallel::Auxiliary::setValue)
, dim3(toGridDim(totalNumCell / DefaultNThreadPerBlock + 1)),
dim3(DefaultNThreadPerBlock), 0, 0,
numNeighbor, totalNumCell, IndexType(0));
hipLaunchKernelGGL(( Parallel::CudaGlobal::buildCellNeighborhood)
, dim3(sub0.size()), dim3(1), 0, 0,
numCell,
list.getDevideLevel(),
list.getRlist(),
list.getGlobalBoxSize(),
rankx, ranky, rankz,
Nx, Ny, Nz,
deviceSub0,
sub0.size(),
deviceSub1,
sub1.size(),
numNeighbor,
neighborCellIndex,
neighborShiftNoi,
MaxNeiPerCell);
hipFree(deviceSub0);
hipFree(deviceSub1);
checkCUDAError ("DeviceCellListedMDData::build, cuda free");
}
__global__ void Parallel::CudaGlobal::
buildCellNeighborhood (const IntVectorType numCell,
const IndexType devideLevel,
const ScalorType rlist,
const HostVectorType globalBoxSize,
const int rankx,
const int ranky,
const int rankz,
const int nProcDimx,
const int nProcDimy,
const int nProcDimz,
const IndexType * subList0,
const IndexType length0,
const IndexType * subList1,
const IndexType length1,
IndexType * numNeighbor,
IndexType * neighborCellIndex,
CoordNoiType * neighborShiftNoi,
const IndexType stride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
if (bid >= length0) return ;
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (numCell.x == 3) oneCellX = true;
if (numCell.y == 3) oneCellY = true;
if (numCell.z == 3) oneCellZ = true;
dim3 boxSize;
ScalorType scalorx, scalory, scalorz;
if (tid == 0){
boxSize.x = globalBoxSize.x / nProcDimx;
boxSize.y = globalBoxSize.y / nProcDimy;
boxSize.z = globalBoxSize.z / nProcDimz;
oneCellX ? scalorx = boxSize.x :
scalorx = boxSize.x / ScalorType(numCell.x - (devideLevel << 1));
oneCellY ? scalory = boxSize.y :
scalory = boxSize.y / ScalorType(numCell.y - (devideLevel << 1));
oneCellZ ? scalorz = boxSize.z :
scalorz = boxSize.z / ScalorType(numCell.z - (devideLevel << 1));
}
IndexType my_cellIndex = subList0[bid];
IntScalorType my_cellIndexx, my_cellIndexy, my_cellIndexz;
if (tid == 0){
Parallel::CudaDevice::D1toD3 (numCell,
int(my_cellIndex),
my_cellIndexx, my_cellIndexy, my_cellIndexz);
ScalorType rlist2 = rlist * rlist;
for (IndexType ii = 0; ii < length1; ++ii){
IndexType target_cellIndex = subList1[ii];
IntScalorType target_cellIndexx, target_cellIndexy, target_cellIndexz;
Parallel::CudaDevice::D1toD3 (numCell,
int(target_cellIndex),
target_cellIndexx,
target_cellIndexy,
target_cellIndexz);
if (abs(target_cellIndexx - my_cellIndexx) > devideLevel ||
abs(target_cellIndexy - my_cellIndexy) > devideLevel ||
abs(target_cellIndexz - my_cellIndexz) > devideLevel ){
continue;
}
CoordNoiType myshift ;
myshift.x = myshift.y = myshift.z = 0;
if (rankx == 0 &&
target_cellIndexx < devideLevel) {
myshift.x = - 1;
}
else if (rankx == nProcDimx - 1 &&
target_cellIndexx >= int(numCell.x - devideLevel)){
myshift.x = 1;
}
if (ranky == 0 &&
target_cellIndexy < devideLevel) {
myshift.y = - 1;
}
else if (ranky == nProcDimy - 1 &&
target_cellIndexy >= int(numCell.y - devideLevel)){
myshift.y = 1;
}
if (rankz == 0 &&
target_cellIndexz < devideLevel) {
myshift.z = - 1;
}
else if (rankz == nProcDimz - 1 &&
target_cellIndexz >= int(numCell.z - devideLevel)){
myshift.z = 1;
}
ScalorType min = 1e9;
for (int dx = -1; dx <= 1; ++dx){
for (int dy = -1; dy <= 1; ++dy){
for (int dz = -1; dz <= 1; ++dz){
ScalorType diffx ((-my_cellIndexx + target_cellIndexx + dx) * scalorx);
ScalorType diffy ((-my_cellIndexy + target_cellIndexy + dy) * scalory);
ScalorType diffz ((-my_cellIndexz + target_cellIndexz + dz) * scalorz);
ScalorType diff2 (diffx * diffx + diffy * diffy + diffz * diffz);
if (diff2 < min){
min = diff2;
}
}
}
}
if (min < rlist2){
neighborShiftNoi [(numNeighbor[my_cellIndex] ) + my_cellIndex * stride]
= myshift;
neighborCellIndex[(numNeighbor[my_cellIndex]++) + my_cellIndex * stride]
= target_cellIndex;
}
}
}
}
// __global__ void Parallel::CudaGlobal::
// buildCellNeighborhood (const IntVectorType numCell,
// const IndexType devideLevel,
// const ScalorType rlist,
// const HostVectorType globalBoxSize,
// const int rankx,
// const int ranky,
// const int rankz,
// const int nProcDimx,
// const int nProcDimy,
// const int nProcDimz,
// const IndexType * subList0,
// const IndexType length0,
// const IndexType * subList1,
// const IndexType length1,
// IndexType * numNeighbor,
// IndexType * neighborCellIndex,
// const IndexType stride)
// {
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// if (bid >= length0) return ;
// int centerx, centery, centerz;
// if (tid == 0){
// HostVectorType boxSize;
// boxSize.x = globalBoxSize.x / nProcDimx;
// boxSize.y = globalBoxSize.y / nProcDimy;
// boxSize.z = globalBoxSize.z / nProcDimz;
// ScalorType rlist2 = rlist;
// numNeighbor[bid] = 0;
// Parallel::CudaDevice::D1toD3 (numCell, int(bid), centerx, centery, centerz);
// bool oneCellX(false), oneCellY(false), oneCellZ(false);
// if (numCell.x == 3) oneCellX = true;
// if (numCell.y == 3) oneCellY = true;
// if (numCell.z == 3) oneCellZ = true;
// ScalorType scalorx, scalory, scalorz;
// oneCellX ? scalorx = boxSize.x :
// scalorx = boxSize.x / ScalorType(numCell.x - (devideLevel << 1));
// oneCellY ? scalory = boxSize.y :
// scalory = boxSize.y / ScalorType(numCell.y - (devideLevel << 1));
// oneCellZ ? scalorz = boxSize.z :
// scalorz = boxSize.z / ScalorType(numCell.z - (devideLevel << 1));
// int targetx, targety, targetz;
// IndexType targetIndex;
// for (IndexType i = 0; i < length1; ++i){
// targetIndex = subList1[i];
// Parallel::CudaDevice::D1toD3 (numCell, int(targetIndex),
// targetx, targety, targetz);
// ScalorType min = 1e9;
// #pragma unroll 27
// for (int dx = -1; dx <= 1; ++dx){
// for (int dy = -1; dy <= 1; ++dy){
// for (int dz = -1; dz <= 1; ++dz){
// ScalorType diffx ((-centerx + targetx + dx) * scalorx);
// ScalorType diffy ((-centery + targety + dy) * scalory);
// ScalorType diffz ((-centerz + targetz + dz) * scalorz);
// // shortestImage (box, &diffx, &diffy, &diffz);
// ScalorType diff2 (diffx * diffx + diffy * diffy + diffz * diffz);
// if (diff2 < min){
// min = diff2;
// }
// }
// }
// }
// if (min < rlist2){
// IndexType tmp = Parallel::CudaDevice::D3toD1 (numCell,
// targetx, targety, targetz);
// neighborCellIndex[(numNeighbor[bid]++) + bid * stride] = tmp;
// }
// }
// }
// }
__global__ void Parallel::CudaGlobal::
rescaleCoordinate (const IndexType * numAtomInCell,
const HostVectorType scale,
CoordType * coord)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType this_numAtomInCell = numAtomInCell[bid];
IndexType ii = threadIdx.x + bid * blockDim.x;
if (threadIdx.x < this_numAtomInCell){
coord[ii].x *= scale.x;
coord[ii].y *= scale.y;
coord[ii].z *= scale.z;
}
}
__global__ void Parallel::CudaGlobal::
rescaleVelocity (const IndexType * numAtomInCell,
const HostVectorType scale,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType this_numAtomInCell = numAtomInCell[bid];
IndexType ii = threadIdx.x + bid * blockDim.x;
if (threadIdx.x < this_numAtomInCell){
velox[ii] *= scale.x;
veloy[ii] *= scale.y;
veloz[ii] *= scale.z;
}
}
void Parallel::DeviceCellListedMDData::
rescaleCoordinate (const HostVectorType & scale)
{
HostVectorType globalBoxSize = getGlobalBoxSize();
IntVectorType numCell = getNumCell();
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
dim3 gridDim = toGridDim (totalNumCell);
hipLaunchKernelGGL(( Parallel::CudaGlobal::rescaleCoordinate)
, dim3(gridDim), dim3(Parallel::Interface::numThreadsInCell()), 0, 0,
numAtomInCell,
scale,
coord);
checkCUDAError ("DeviceCellListedMDData::rescaleCoordinate");
globalBoxSize.x *= scale.x;
globalBoxSize.y *= scale.y;
globalBoxSize.z *= scale.z;
setGlobalBox (globalBoxSize.x, globalBoxSize.y, globalBoxSize.z);
frameLow.x *= scale.x;
frameLow.y *= scale.y;
frameLow.z *= scale.z;
frameUp.x *= scale.x;
frameUp.y *= scale.y;
frameUp.z *= scale.z;
}
void Parallel::DeviceCellListedMDData::
rescaleVelocity (const HostVectorType & scale)
{
HostVectorType globalBoxSize = getGlobalBoxSize();
IntVectorType numCell = getNumCell();
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
dim3 gridDim = toGridDim (totalNumCell);
hipLaunchKernelGGL(( Parallel::CudaGlobal::rescaleVelocity)
, dim3(gridDim), dim3(Parallel::Interface::numThreadsInCell()), 0, 0,
numAtomInCell,
scale,
velox, veloy, veloz);
checkCUDAError ("DeviceCellListedMDData::rescaleVelocity");
}
void Parallel::DeviceCellListedMDData::
mallocFromHost (const HostCellListedMDData & hdata)
{
DeviceMDData::mallocFromHost (hdata);
rlist = hdata.getRlist();
devideLevel = hdata.getDevideLevel();
numCell.x = hdata.getNumCell().x;
numCell.y = hdata.getNumCell().y;
numCell.z = hdata.getNumCell().z;
frameLow.x = hdata.getFrameLow().x;
frameLow.y = hdata.getFrameLow().y;
frameLow.z = hdata.getFrameLow().z;
frameUp.x = hdata.getFrameUp().x;
frameUp.y = hdata.getFrameUp().y;
frameUp.z = hdata.getFrameUp().z;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
easyMallocCell (totalNumCell);
hipLaunchKernelGGL(( setValue) , dim3(totalNumCell / DefaultNThreadPerBlock + 1), dim3(DefaultNThreadPerBlock), 0, 0,
numAtomInCell, totalNumCell, IndexType(0));
}
void Parallel::DeviceCellListedMDData::
mallocToHost (HostCellListedMDData & hdata) const
{
DeviceMDData::mallocToHost (hdata);
hdata.rlist = rlist;
hdata.devideLevel = devideLevel;
hdata.frameLow.x = frameLow.x;
hdata.frameLow.y = frameLow.y;
hdata.frameLow.z = frameLow.z;
hdata.frameUp.x = frameUp.x;
hdata.frameUp.y = frameUp.y;
hdata.frameUp.z = frameUp.z;
hdata.numCell.x = numCell.x;
hdata.numCell.y = numCell.y;
hdata.numCell.z = numCell.z;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
hdata.easyReallocCell (totalNumCell);
for (IndexType i = 0; i < totalNumCell; ++i){
hdata.numAtomInCell[i] = 0;
}
}
void Parallel::DeviceCellListedMDData::
mallocFromDevice (const DeviceCellListedMDData & ddata)
{
DeviceMDData::mallocFromDevice (ddata);
rlist = ddata.getRlist();
devideLevel = ddata.getDevideLevel();
numCell.x = ddata.getNumCell().x;
numCell.y = ddata.getNumCell().y;
numCell.z = ddata.getNumCell().z;
frameLow.x = ddata.getFrameLow().x;
frameLow.y = ddata.getFrameLow().y;
frameLow.z = ddata.getFrameLow().z;
frameUp.x = ddata.getFrameUp().x;
frameUp.y = ddata.getFrameUp().y;
frameUp.z = ddata.getFrameUp().z;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
easyMallocCell (totalNumCell);
hipLaunchKernelGGL(( setValue) , dim3(totalNumCell / DefaultNThreadPerBlock + 1), dim3(DefaultNThreadPerBlock), 0, 0,
numAtomInCell, totalNumCell, IndexType(0));
}
| 33710460db38d4d33ac7329d203d7168fb080932.cu | #define DEVICE_CODE
#include "Parallel_CellList.h"
#include "Parallel_Interface.h"
#include "Parallel_CellList_device.h"
#include "Parallel_Algorithm.h"
#include "Parallel_BondList.h"
#include "Auxiliary.h"
#include "Parallel_Timer.h"
#include "Parallel_Interface.h"
#include "Parallel_Auxiliary.h"
#include "compile_error_mixcode.h"
void Parallel::DeviceCellListedMDData::
initZeroCell ()
{
IndexType numThreadsInCell = Parallel::Interface::numThreadsInCell();
dim3 gridDim = toGridDim(numCell.x*numCell.y*numCell.z);
Parallel::CudaGlobal::initZeroCell
<<<gridDim, numThreadsInCell>>>(
numCell,
numAtomInCell);
checkCUDAError ("DeviceCellListedMDData::initZeroCell");
}
void Parallel::DeviceCellListedMDData::
calNumCell (const ScalorType & input_rlist,
const IndexType & input_dividelevel,
const BoxDirection_t & bdir,
IntVectorType & result_numCell,
VectorType & result_frameLow,
VectorType & result_frameUp)
{
int Nx, Ny, Nz;
Parallel::Interface::numProcDim (Nx, Ny, Nz);
int ix, iy, iz;
Parallel::Interface::rankToCartCoord (Parallel::Interface::myRank(), ix, iy, iz);
double dx, dy, dz;
dx = getGlobalBoxSize().x / double(Nx);
dy = getGlobalBoxSize().y / double(Ny);
dz = getGlobalBoxSize().z / double(Nz);
result_frameLow.x = dx * ix;
result_frameLow.y = dy * iy;
result_frameLow.z = dz * iz;
result_frameUp.x = result_frameLow.x + dx;
result_frameUp.y = result_frameLow.y + dy;
result_frameUp.z = result_frameLow.z + dz;
bool CellOnX, CellOnY, CellOnZ;
CellOnX = bdir & RectangularBoxGeometry::mdRectBoxDirectionX;
CellOnY = bdir & RectangularBoxGeometry::mdRectBoxDirectionY;
CellOnZ = bdir & RectangularBoxGeometry::mdRectBoxDirectionZ;
double input_rlisti = 1./input_rlist;
if (CellOnX ) result_numCell.x = int ( floor(dx * input_rlisti) );
else result_numCell.x = 1;
if (CellOnY ) result_numCell.y = int ( floor(dy * input_rlisti) );
else result_numCell.y = 1;
if (CellOnZ ) result_numCell.z = int ( floor(dz * input_rlisti) );
else result_numCell.z = 1;
if ((CellOnX && result_numCell.x < 3) ||
(CellOnY && result_numCell.y < 3) ||
(CellOnZ && result_numCell.z < 3) ){
printf("input_rlist is %f, nx %d ny %d nz %d\n", input_rlist, result_numCell.x, result_numCell.y, result_numCell.z);
throw MDExcptCellList ("Number of cell on one direction is less than 3");
}
// add ghost cell
VectorType dcell;
dcell.x = (result_frameUp.x - result_frameLow.x) / result_numCell.x;
dcell.y = (result_frameUp.y - result_frameLow.y) / result_numCell.y;
dcell.z = (result_frameUp.z - result_frameLow.z) / result_numCell.z;
result_frameUp.x += dcell.x;
result_frameUp.y += dcell.y;
result_frameUp.z += dcell.z;
result_frameLow.x -= dcell.x;
result_frameLow.y -= dcell.y;
result_frameLow.z -= dcell.z;
result_numCell.x += 2;
result_numCell.y += 2;
result_numCell.z += 2;
if (CellOnX) result_numCell.x *= input_dividelevel;
if (CellOnY) result_numCell.y *= input_dividelevel;
if (CellOnZ) result_numCell.z *= input_dividelevel;
}
void Parallel::DeviceCellListedMDData::
initCellStructure (const ScalorType & rlist_,
const IndexType & devideLevel_,
const BoxDirection_t & bdir)
{
rlist = rlist_;
devideLevel = devideLevel_;
calNumCell (rlist, devideLevel, bdir, numCell, frameLow, frameUp);
DeviceMDData bkData (*this);
IndexType numThreadsInCell = Parallel::Interface::numThreadsInCell ();
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
// maxNumNeighborCell = 1;
// if (CellOnX) maxNumNeighborCell *= devideLevel * 2 + 1;
// if (CellOnY) maxNumNeighborCell *= devideLevel * 2 + 1;
// if (CellOnZ) maxNumNeighborCell *= devideLevel * 2 + 1;
if (numThreadsInCell * totalNumCell > DeviceMDData::memSize()){
DeviceMDData::easyMalloc (numThreadsInCell * totalNumCell);
}
numData() = totalNumCell * numThreadsInCell;
// printf ("rank %d, numcell %d\n", Parallel::Interface::myRank(), totalNumCell);
// getchar ();
// mallocCell (totalNumCell, maxNumNeighborCell);
easyMallocCell (totalNumCell);
initZeroCell ();
IndexType numThreadBlock = numThreadsInCell;
dim3 gridDim = toGridDim(numCell.x*numCell.y*numCell.z);
Parallel::CudaGlobal::formCellStructure
<<<gridDim, numThreadBlock >>>(
frameLow,
frameUp,
numCell,
numAtomInCell,
bkData.numData(),
bkData.dptr_coordinate(),
bkData.dptr_coordinateNoi(),
bkData.dptr_velocityX(),
bkData.dptr_velocityY(),
bkData.dptr_velocityZ(),
bkData.dptr_globalIndex(),
bkData.dptr_type(),
bkData.dptr_mass(),
bkData.dptr_charge(),
coord,
coordNoi,
velox,
veloy,
veloz,
globalIndex,
type,
mass,
charge,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::formCellStructure");
err.updateHost();
err.check ("DeviceCellListedMDData::formCellSturcture");
}
bool Parallel::DeviceCellListedMDData::
reinitCellStructure (const ScalorType & rlist_,
const IndexType & devideLevel_,
const BoxDirection_t & bdir)
{
IntVectorType tmpNumCell;
VectorType tmpFrameLow, tmpFrameUp;
calNumCell (rlist_, devideLevel_, bdir, tmpNumCell, tmpFrameLow, tmpFrameUp);
if (tmpNumCell.x == numCell.x &&
tmpNumCell.y == numCell.y &&
tmpNumCell.z == numCell.z ){
rlist = rlist_;
devideLevel = devideLevel_;
return false;
}
DeviceCellListedMDData bkData ;
bkData.copyFromDevice (*this, MDDataItemMask_All);
rlist = rlist_;
devideLevel = devideLevel_;
numCell = tmpNumCell;
frameLow = tmpFrameLow;
frameUp = tmpFrameUp;
IndexType numThreadsInCell = Parallel::Interface::numThreadsInCell ();
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
if (numThreadsInCell * totalNumCell > DeviceMDData::memSize()){
DeviceMDData::easyMalloc (numThreadsInCell * totalNumCell,
getMaxNumBond(), getMaxNumAngle(), getMaxNumDihedral());
}
numData() = totalNumCell * numThreadsInCell;
easyMallocCell (totalNumCell);
initZeroCell ();
IndexType numThreadBlock = numThreadsInCell;
IntVectorType numCell_ = bkData.getNumCell();
dim3 gridDim = toGridDim(numCell_.x*numCell_.y*numCell_.z);
IndexType * forwardMap;
cudaMalloc ((void**)&forwardMap,
sizeof(IndexType) * numCell_.x * numCell_.y * numCell_.z * numThreadsInCell);
checkCUDAError ("DeviceCellListedMDData::reinitCellStructure malloc forwardMap");
Parallel::CudaGlobal::reinitCellStructure_calForwardMap
<<<gridDim, numThreadBlock >>>(
bkData.dptr_numAtomInCell(),
bkData.dptr_coordinate(),
frameLow,
frameUp,
numCell,
numAtomInCell,
forwardMap,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::reinitCellStructure_calForwardMap");
err.updateHost();
err.check ("DeviceCellListedMDData::reinitCellStructure_calForwardMap");
Parallel::CudaGlobal::reinitCellStructure_step1
<<<gridDim, numThreadBlock >>>(
bkData.dptr_numAtomInCell(),
bkData.dptr_coordinate(),
bkData.dptr_coordinateNoi(),
bkData.dptr_velocityX(),
bkData.dptr_velocityY(),
bkData.dptr_velocityZ(),
bkData.dptr_forceX(),
bkData.dptr_forceY(),
bkData.dptr_forceZ(),
bkData.dptr_globalIndex(),
bkData.dptr_type(),
bkData.dptr_mass(),
bkData.dptr_charge(),
forwardMap,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge);
checkCUDAError ("DeviceCellListedMDData::reinitCellStructure_step1");
Parallel::CudaGlobal::reinitCellStructure_step2
<<<gridDim, numThreadBlock>>>(
bkData.dptr_numAtomInCell(),
bkData.getMaxNumBond(),
bkData.dptr_numBond(),
bkData.dptr_bondIndex(),
bkData.dptr_bondNeighbor_globalIndex(),
bkData.getMaxNumAngle(),
bkData.dptr_numAngle(),
bkData.dptr_angleIndex(),
bkData.dptr_anglePosi(),
bkData.dptr_angleNeighbor_globalIndex(),
bkData.getMaxNumDihedral(),
bkData.dptr_numDihedral(),
bkData.dptr_dihedralIndex(),
bkData.dptr_dihedralPosi(),
bkData.dptr_dihedralNeighbor_globalIndex(),
bkData.getBondTopStride(),
forwardMap,
dptr_numBond(),
dptr_bondIndex(),
dptr_bondNeighbor_globalIndex(),
dptr_numAngle(),
dptr_angleIndex(),
dptr_anglePosi(),
dptr_angleNeighbor_globalIndex(),
dptr_numDihedral(),
dptr_dihedralIndex(),
dptr_dihedralPosi(),
dptr_dihedralNeighbor_globalIndex(),
getBondTopStride());
cudaFree (forwardMap);
return true;
}
void Parallel::DeviceCellListedMDData::
rebuild ()
{
IndexType numThreadBlock = Parallel::Interface::numThreadsInCell();
IndexType totalNumCell = numCell.x*numCell.y*numCell.z;
dim3 gridDim = toGridDim(totalNumCell);
IndexType * bk_numAtomInCell;
cudaMalloc ((void**)&bk_numAtomInCell, totalNumCell * sizeof(IndexType));
cudaMemcpy (bk_numAtomInCell, numAtomInCell, totalNumCell * sizeof(IndexType),
cudaMemcpyDeviceToDevice);
checkCUDAError ("DeviceCellListedMDData::rebuild malloc backup");
if (getMaxNumBond() == 0 &&
getMaxNumAngle() == 0 &&
getMaxNumDihedral() == 0){
Parallel::CudaGlobal::rebuildCellList_step1
<<<gridDim, numThreadBlock>>> (
frameLow,
frameUp,
numCell,
bk_numAtomInCell,
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::rebuild step1");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step1");
Parallel::CudaGlobal::rebuildCellList_step2
<<<gridDim, numThreadBlock, numThreadBlock*sizeof(IndexType)*3>>> (
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::rebuild step2");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step2");
}
else{
Parallel::CudaGlobal::rebuildCellList_step1
<<< gridDim, numThreadBlock>>> (
frameLow,
frameUp,
numCell,
bk_numAtomInCell,
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
forwardMap_step1,
err.ptr_de,
err.ptr_dindex,
err.ptr_dscalor);
checkCUDAError ("DeviceCellListedMDData::rebuild step1");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step1");
Parallel::CudaGlobal::rebuildCellList_step1_mapBondTop
<<<gridDim, numThreadBlock>>> (
forwardMap_step1,
bk_numAtomInCell,
getMaxNumBond(),
dptr_numBond(),
dptr_bondIndex(),
dptr_bondNeighbor_globalIndex(),
getMaxNumAngle(),
dptr_numAngle(),
dptr_angleIndex(),
dptr_anglePosi(),
dptr_angleNeighbor_globalIndex(),
getMaxNumDihedral(),
dptr_numDihedral(),
dptr_dihedralIndex(),
dptr_dihedralPosi(),
dptr_angleNeighbor_globalIndex(),
bondTopStride());
checkCUDAError ("DeviceCellListedMDData::rebuild map top step1");
cudaMemcpy (bk_numAtomInCell, dptr_numAtomInCell(),
totalNumCell * sizeof(IndexType), cudaMemcpyDeviceToDevice);
Parallel::CudaGlobal::rebuildCellList_step2
<<<gridDim, numThreadBlock, numThreadBlock*sizeof(IndexType)*3>>> (
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
forwardMap_step2,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::rebuild step2");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step2");
Parallel::CudaGlobal::rebuildCellList_step2_mapBondTop
<<<gridDim, numThreadBlock>>> (
forwardMap_step2,
bk_numAtomInCell,
getMaxNumBond(),
dptr_numBond(),
dptr_bondIndex(),
dptr_bondNeighbor_globalIndex(),
getMaxNumAngle(),
dptr_numAngle(),
dptr_angleIndex(),
dptr_anglePosi(),
dptr_angleNeighbor_globalIndex(),
getMaxNumDihedral(),
dptr_numDihedral(),
dptr_dihedralIndex(),
dptr_dihedralPosi(),
dptr_angleNeighbor_globalIndex(),
bondTopStride());
checkCUDAError ("DeviceCellListedMDData::rebuild map top step2");
}
cudaFree (bk_numAtomInCell);
checkCUDAError ("DeviceCellListedMDData::rebuild free backup");
}
void Parallel::DeviceCellListedMDData::
rebuild (DeviceBondList & dbdlist)
{
IndexType numThreadBlock = Parallel::Interface::numThreadsInCell();
IndexType totalNumCell = numCell.x*numCell.y*numCell.z;
dim3 gridDim = toGridDim(totalNumCell);
IndexType * bk_numAtomInCell;
cudaMalloc ((void**)&bk_numAtomInCell, totalNumCell * sizeof(IndexType));
cudaMemcpy (bk_numAtomInCell, numAtomInCell, totalNumCell * sizeof(IndexType),
cudaMemcpyDeviceToDevice);
checkCUDAError ("DeviceCellListedMDData::rebuild malloc backup");
if (getMaxNumBond() == 0 &&
getMaxNumAngle() == 0 &&
getMaxNumDihedral() == 0){
Parallel::CudaGlobal::rebuildCellList_step1
<<<gridDim, numThreadBlock>>> (
frameLow,
frameUp,
numCell,
bk_numAtomInCell,
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::rebuild step1");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step1");
Parallel::CudaGlobal::rebuildCellList_step2
<<<gridDim, numThreadBlock, numThreadBlock*sizeof(IndexType)*3>>> (
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::rebuild step2");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step2");
}
else {
Parallel::CudaGlobal::rebuildCellList_step1
<<< gridDim, numThreadBlock>>> (
frameLow,
frameUp,
numCell,
bk_numAtomInCell,
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
forwardMap_step1,
err.ptr_de,
err.ptr_dindex,
err.ptr_dscalor);
checkCUDAError ("DeviceCellListedMDData::rebuild step1");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step1");
Parallel::CudaGlobal::rebuildCellList_step1_mapBondTop
<<<gridDim, numThreadBlock>>> (
forwardMap_step1,
bk_numAtomInCell,
getMaxNumBond(),
dptr_numBond(),
dptr_bondIndex(),
dptr_bondNeighbor_globalIndex(),
dbdlist.dptr_bondNeighbor_localIndex(),
getMaxNumAngle(),
dptr_numAngle(),
dptr_angleIndex(),
dptr_anglePosi(),
dptr_angleNeighbor_globalIndex(),
dbdlist.dptr_angleNeighbor_localIndex(),
getMaxNumDihedral(),
dptr_numDihedral(),
dptr_dihedralIndex(),
dptr_dihedralPosi(),
dptr_angleNeighbor_globalIndex(),
dbdlist.dptr_dihedralNeighbor_localIndex(),
bondTopStride());
checkCUDAError ("DeviceCellListedMDData::rebuild map top step1");
cudaMemcpy (bk_numAtomInCell, dptr_numAtomInCell(),
totalNumCell * sizeof(IndexType), cudaMemcpyDeviceToDevice);
Parallel::CudaGlobal::rebuildCellList_step2
<<<gridDim, numThreadBlock, numThreadBlock*sizeof(IndexType)*3>>> (
numAtomInCell,
coord,
coordNoi,
velox,
veloy,
veloz,
forcx,
forcy,
forcz,
globalIndex,
type,
mass,
charge,
forwardMap_step2,
err.ptr_de);
checkCUDAError ("DeviceCellListedMDData::rebuild step2");
err.updateHost();
err.check ("DeviceCellListedMDData::rebuild step2");
Parallel::CudaGlobal::rebuildCellList_step2_mapBondTop
<<<gridDim, numThreadBlock>>> (
forwardMap_step2,
bk_numAtomInCell,
getMaxNumBond(),
dptr_numBond(),
dptr_bondIndex(),
dptr_bondNeighbor_globalIndex(),
dbdlist.dptr_bondNeighbor_localIndex(),
getMaxNumAngle(),
dptr_numAngle(),
dptr_angleIndex(),
dptr_anglePosi(),
dptr_angleNeighbor_globalIndex(),
dbdlist.dptr_angleNeighbor_localIndex(),
getMaxNumDihedral(),
dptr_numDihedral(),
dptr_dihedralIndex(),
dptr_dihedralPosi(),
dptr_angleNeighbor_globalIndex(),
dbdlist.dptr_dihedralNeighbor_localIndex(),
bondTopStride());
checkCUDAError ("DeviceCellListedMDData::rebuild map top step2");
}
cudaFree (bk_numAtomInCell);
checkCUDAError ("DeviceCellListedMDData::rebuild free backup");
}
__global__ void Parallel::CudaGlobal::
initZeroCell (const IntVectorType numCell,
IndexType * numAtomInCell)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
if (ii < totalNumCell){
numAtomInCell[ii] = 0;
// numNeighborCell[ii] = 0;
}
}
__global__ void Parallel::CudaGlobal::
formCellStructure (const VectorType frameLow,
const VectorType frameUp,
const IntVectorType numCell,
IndexType * numAtomInCell,
const IndexType numAtom,
const CoordType * bk_coord,
const CoordNoiType * bk_coordNoi,
const ScalorType * bk_velox,
const ScalorType * bk_veloy,
const ScalorType * bk_veloz,
const IndexType * bk_globalIndex,
const TypeType * bk_type,
const ScalorType * bk_mass,
const ScalorType * bk_charge,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
__shared__ ScalorType dcellxi;
__shared__ ScalorType dcellyi;
__shared__ ScalorType dcellzi;
if (tid == 0) {
dcellxi = ScalorType(numCell.x) / (frameUp.x - frameLow.x);
}
if (tid == 1){
dcellyi = ScalorType(numCell.y) / (frameUp.y - frameLow.y);
}
if (tid == 2){
dcellzi = ScalorType(numCell.z) / (frameUp.z - frameLow.z);
}
__syncthreads();
IndexType targetIndex;
if (ii < numAtom){
IndexType targetCellx, targetCelly, targetCellz;
targetCellx = IndexType((bk_coord[ii].x - frameLow.x) * dcellxi);
targetCelly = IndexType((bk_coord[ii].y - frameLow.y) * dcellyi);
targetCellz = IndexType((bk_coord[ii].z - frameLow.z) * dcellzi);
// if (targetCellx == numCell.x){
// targetCellx = numCell.x - 1;
// }
// if (targetCelly == numCell.y){
// targetCelly = numCell.y - 1;
// }
// if (targetCellz == numCell.z){
// targetCellz = numCell.z - 1;
// }
if (ptr_de != NULL &&
(targetCellx >= numCell.x ||
targetCelly >= numCell.y ||
targetCellz >= numCell.z)){
*ptr_de = mdErrorOverFlowCellIdx;
return;
}
IndexType cellid = CudaDevice::D3toD1
(numCell, targetCellx, targetCelly, targetCellz);
IndexType pid = atomicAdd (&numAtomInCell[cellid], 1);
if (pid >= blockDim.x){
*ptr_de = mdErrorShortCellList;
pid = 0;
}
targetIndex = pid + cellid * blockDim.x;
coord[targetIndex] = bk_coord[ii];
coordNoi[targetIndex].x = bk_coordNoi[ii].x;
coordNoi[targetIndex].y = bk_coordNoi[ii].y;
coordNoi[targetIndex].z = bk_coordNoi[ii].z;
velox[targetIndex] = bk_velox[ii];
veloy[targetIndex] = bk_veloy[ii];
veloz[targetIndex] = bk_veloz[ii];
globalIndex[targetIndex] = bk_globalIndex[ii];
type[targetIndex] = bk_type[ii];
mass[targetIndex] = bk_mass[ii];
charge[targetIndex] = bk_charge[ii];
}
}
__global__ void Parallel::CudaGlobal::
reinitCellStructure_calForwardMap (const IndexType * bk_numAtomInCell,
const CoordType * bk_coord,
const VectorType frameLow,
const VectorType frameUp,
const IntVectorType numCell,
IndexType * numAtomInCell,
IndexType * forwardMap,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
IndexType this_numAtom = bk_numAtomInCell[bid];
if (this_numAtom == 0) return;
ScalorType dcellxi;
ScalorType dcellyi;
ScalorType dcellzi;
dcellxi = ScalorType(numCell.x) / (frameUp.x - frameLow.x);
dcellyi = ScalorType(numCell.y) / (frameUp.y - frameLow.y);
dcellzi = ScalorType(numCell.z) / (frameUp.z - frameLow.z);
if (tid < this_numAtom){
IndexType targetCellx, targetCelly, targetCellz;
targetCellx = IndexType((bk_coord[ii].x - frameLow.x) * dcellxi);
targetCelly = IndexType((bk_coord[ii].y - frameLow.y) * dcellyi);
targetCellz = IndexType((bk_coord[ii].z - frameLow.z) * dcellzi);
if (ptr_de != NULL &&
(targetCellx >= numCell.x ||
targetCelly >= numCell.y ||
targetCellz >= numCell.z)){
*ptr_de = mdErrorOverFlowCellIdx;
return;
}
IndexType cellid = CudaDevice::D3toD1
(numCell, targetCellx, targetCelly, targetCellz);
IndexType pid = atomicAdd (&numAtomInCell[cellid], 1);
if (pid >= blockDim.x){
*ptr_de = mdErrorShortCellList;
pid = 0;
}
IndexType targetIndex = pid + cellid * blockDim.x;
forwardMap[ii] = targetIndex;
}
}
__global__ void Parallel::CudaGlobal::
reinitCellStructure_step1 (const IndexType * bk_numAtomInCell,
const CoordType * bk_coord,
const CoordNoiType * bk_coordNoi,
const ScalorType * bk_velox,
const ScalorType * bk_veloy,
const ScalorType * bk_veloz,
const ScalorType * bk_forcx,
const ScalorType * bk_forcy,
const ScalorType * bk_forcz,
const IndexType * bk_globalIndex,
const TypeType * bk_type,
const ScalorType * bk_mass,
const ScalorType * bk_charge,
const IndexType * forwardMap,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
IndexType this_numAtom = bk_numAtomInCell[bid];
if (this_numAtom == 0) return;
if (tid < this_numAtom){
IndexType targetIndex = forwardMap[ii];
coord[targetIndex] = bk_coord[ii];
coordNoi[targetIndex].x = bk_coordNoi[ii].x;
coordNoi[targetIndex].y = bk_coordNoi[ii].y;
coordNoi[targetIndex].z = bk_coordNoi[ii].z;
velox[targetIndex] = bk_velox[ii];
veloy[targetIndex] = bk_veloy[ii];
veloz[targetIndex] = bk_veloz[ii];
forcx[targetIndex] = bk_forcx[ii];
forcy[targetIndex] = bk_forcy[ii];
forcz[targetIndex] = bk_forcz[ii];
globalIndex[targetIndex] = bk_globalIndex[ii];
type[targetIndex] = bk_type[ii];
mass[targetIndex] = bk_mass[ii];
charge[targetIndex] = bk_charge[ii];
}
}
__global__ void Parallel::CudaGlobal::
reinitCellStructure_step2 (const IndexType * bk_numAtomInCell,
const IndexType bk_maxNumBond,
const IndexType * bk_numBond,
const IndexType * bk_bondIndex,
const IndexType * bk_bondNeighbor_globalIndex,
const IndexType bk_maxNumAngle,
const IndexType * bk_numAngle,
const IndexType * bk_angleIndex,
const IndexType * bk_anglePosi,
const IndexType * bk_angleNeighbor_globalIndex,
const IndexType bk_maxNumDihedral,
const IndexType * bk_numDihedral,
const IndexType * bk_dihedralIndex,
const IndexType * bk_dihedralPosi,
const IndexType * bk_dihedralNeighbor_globalIndex,
const IndexType bk_bondTopStride,
const IndexType * forwardMap,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi,
IndexType * angleNeighbor_globalIndex,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi,
IndexType * dihedralNeighbor_globalIndex,
const IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
IndexType this_numAtom = bk_numAtomInCell[bid];
if (this_numAtom == 0) return;
if (tid < this_numAtom){
IndexType my_targetIndex = forwardMap[ii];
if (bk_maxNumBond != 0){
IndexType tmpNum = numBond[my_targetIndex] = bk_numBond[ii];
IndexType tmp_target = my_targetIndex;
IndexType tmp_src = ii;
for (IndexType jj = 0; jj < tmpNum; ++jj){
bondIndex[tmp_target] = bk_bondIndex[tmp_src];
bondNeighbor_globalIndex[tmp_target] = bk_bondNeighbor_globalIndex[tmp_src];
tmp_target += bondTopStride;
tmp_src += bk_bondTopStride;
}
}
if (bk_maxNumAngle != 0){
IndexType tmpNum = numAngle[my_targetIndex] = bk_numAngle[ii];
IndexType tmp_target = my_targetIndex;
IndexType tmp_src = ii;
for (IndexType jj = 0; jj < tmpNum; ++jj){
angleIndex[tmp_target] = bk_angleIndex[tmp_src];
anglePosi [tmp_target] = bk_anglePosi [tmp_src];
tmp_target += bondTopStride;
tmp_src += bk_bondTopStride;
}
tmp_target = my_targetIndex;
tmp_src = ii;
for (IndexType jj = 0; jj < tmpNum * 2; ++jj){
bondNeighbor_globalIndex[tmp_target] = bk_bondNeighbor_globalIndex[tmp_src];
tmp_target += bondTopStride;
tmp_src += bk_bondTopStride;
}
}
if (bk_maxNumDihedral != 0){
IndexType tmpNum = numDihedral[my_targetIndex] = bk_numDihedral[ii];
IndexType tmp_target = my_targetIndex;
IndexType tmp_src = ii;
for (IndexType jj = 0; jj < tmpNum; ++jj){
dihedralIndex[tmp_target] = bk_dihedralIndex[tmp_src];
dihedralPosi [tmp_target] = bk_dihedralPosi [tmp_src];
tmp_target += bondTopStride;
tmp_src += bk_bondTopStride;
}
tmp_target = my_targetIndex;
tmp_src = ii;
for (IndexType jj = 0; jj < tmpNum * 2; ++jj){
bondNeighbor_globalIndex[tmp_target] = bk_bondNeighbor_globalIndex[tmp_src];
tmp_target += bondTopStride;
tmp_src += bk_bondTopStride;
}
}
}
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step1 (const VectorType frameLow,
const VectorType frameUp,
const IntVectorType numCell,
const IndexType * bk_numAtomInCell,
IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
__shared__ ScalorType dcellxi;
__shared__ ScalorType dcellyi;
__shared__ ScalorType dcellzi;
if (tid == 0) {
dcellxi = ScalorType(numCell.x) / (frameUp.x - frameLow.x);
}
if (tid == 1){
dcellyi = ScalorType(numCell.y) / (frameUp.y - frameLow.y);
}
if (tid == 2){
dcellzi = ScalorType(numCell.z) / (frameUp.z - frameLow.z);
}
__syncthreads();
// IndexType mark = MaxIndexValue - (MaxIndexValue >> 1);
// IndexType mystat ;
if (tid < bk_numAtomInCell[bid]){
// mystat = globalIndex[ii];
IndexType targetCellx, targetCelly, targetCellz;
targetCellx = IndexType((coord[ii].x - frameLow.x) * dcellxi);
targetCelly = IndexType((coord[ii].y - frameLow.y) * dcellyi);
targetCellz = IndexType((coord[ii].z - frameLow.z) * dcellzi);
// printf ("%d %d %d %d %f %f %f\n", ii, targetCellx, targetCelly, targetCellz,
// coord[ii].x, coord[ii].y, coord[ii].z);
// if (targetCellx == numCell.x){
// targetCellx = numCell.x - 1;
// }
// if (targetCelly == numCell.y){
// targetCelly = numCell.y - 1;
// }
// if (targetCellz == numCell.z){
// targetCellz = numCell.z - 1;
// }
if (ptr_de != NULL &&
(targetCellx >= numCell.x ||
targetCelly >= numCell.y ||
targetCellz >= numCell.z)){
*ptr_de = mdErrorOverFlowCellIdx;
return;
}
IndexType cellid = CudaDevice::D3toD1
(numCell, targetCellx, targetCelly, targetCellz);
if (cellid != bid){
// IndexType pid = atomicAdd (&numAtomInCell[cellid], 1);
// if (pid >= blockDim.x){
// *ptr_de = mdErrorShortCellList;
// pid = 0;
// }
IndexType pid = atomicAdd (&numAtomInCell[cellid], 1);
if (pid >= blockDim.x){
*ptr_de = mdErrorShortCellList;
pid = 0;
}
IndexType targetIndex = pid + cellid * blockDim.x;
coord[targetIndex] = coord[ii];
coordNoi[targetIndex].x = coordNoi[ii].x;
coordNoi[targetIndex].y = coordNoi[ii].y;
coordNoi[targetIndex].z = coordNoi[ii].z;
velox[targetIndex] = velox[ii];
veloy[targetIndex] = veloy[ii];
veloz[targetIndex] = veloz[ii];
forcx[targetIndex] = forcx[ii];
forcy[targetIndex] = forcy[ii];
forcz[targetIndex] = forcz[ii];
globalIndex[targetIndex] = globalIndex[ii];
globalIndex[ii] = MaxIndexValue;
type[targetIndex] = type[ii];
mass[targetIndex] = mass[ii];
charge[targetIndex] = charge[ii];
}
}
// globalIndex[ii] = mystat;
return;
}
static __device__ IndexType
headSort (volatile IndexType * index,
volatile IndexType * sbuff)
{
IndexType k = NUintBit - 1;
IndexType tid = threadIdx.x;
sbuff[tid] = getKthBit(index[tid], k);
sbuff[tid+blockDim.x] = 0;
__syncthreads();
IndexType total1 = sumVectorBlockBuffer (sbuff, blockDim.x);
IndexType target, mydata = index[tid];
__syncthreads();
if (getKthBit(index[tid], k)) {
target = blockDim.x - sbuff[tid];
}
else {
target = tid + sbuff[tid] - total1;
}
__syncthreads();
index[target] = mydata;
__syncthreads();
return total1;
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step2 (IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
// IndexType k = NUintBit - 1;
extern __shared__ volatile IndexType sbuff[];
volatile IndexType * myIndex = (volatile IndexType * )sbuff;
if (tid >= numAtomInCell[bid] || globalIndex[ii] == MaxIndexValue){
myIndex[tid] = MaxIndexValue;
}
else {
myIndex[tid] = tid;
}
__syncthreads();
IndexType total = headSort (myIndex, &sbuff[blockDim.x]);
total = blockDim.x - total;
IndexType fromId;
CoordType bk_coord;
CoordNoiType bk_coordNoi;
ScalorType bk_velox, bk_veloy, bk_veloz;
ScalorType bk_forcx, bk_forcy, bk_forcz;
IndexType bk_globalIndex;
TypeType bk_type;
ScalorType bk_mass;
ScalorType bk_charge;
if (tid < total){
fromId = myIndex[tid] + bid * blockDim.x;
if (ii != fromId){
bk_coord = coord[fromId];
bk_coordNoi.x = coordNoi[fromId].x;
bk_coordNoi.y = coordNoi[fromId].y;
bk_coordNoi.z = coordNoi[fromId].z;
bk_velox = velox[fromId];
bk_veloy = veloy[fromId];
bk_veloz = veloz[fromId];
bk_forcx = forcx[fromId];
bk_forcy = forcy[fromId];
bk_forcz = forcz[fromId];
bk_globalIndex = globalIndex[fromId];
bk_type = type[fromId];
bk_mass = mass[fromId];
bk_charge = charge[fromId];
}
}
__syncthreads();
if (tid < total && ii != fromId){
coord[ii] = bk_coord;
coordNoi[ii].x = bk_coordNoi.x;
coordNoi[ii].y = bk_coordNoi.y;
coordNoi[ii].z = bk_coordNoi.z;
velox[ii] = bk_velox;
veloy[ii] = bk_veloy;
veloz[ii] = bk_veloz;
forcx[ii] = bk_forcx;
forcy[ii] = bk_forcy;
forcz[ii] = bk_forcz;
globalIndex[ii] = bk_globalIndex;
type[ii] = bk_type;
mass[ii] = bk_mass;
charge[ii] = bk_charge;
}
// else {
// globalIndex[ii] = MaxIndexValue;
// }
if (tid == 0){
numAtomInCell[bid] = total;
}
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step1 (const VectorType frameLow,
const VectorType frameUp,
const IntVectorType numCell,
const IndexType * bk_numAtomInCell,
IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge,
IndexType * forwardMap,
mdError_t * ptr_de,
IndexType * erridx,
ScalorType * errsrc)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
__shared__ ScalorType dcellxi;
__shared__ ScalorType dcellyi;
__shared__ ScalorType dcellzi;
if (tid == 0) {
dcellxi = ScalorType(numCell.x) / (frameUp.x - frameLow.x);
dcellyi = ScalorType(numCell.y) / (frameUp.y - frameLow.y);
dcellzi = ScalorType(numCell.z) / (frameUp.z - frameLow.z);
}
forwardMap[ii] = MaxIndexValue;
__syncthreads();
bool inRange = tid < bk_numAtomInCell[bid];
if (inRange){
IndexType targetCellx, targetCelly, targetCellz;
targetCellx = IndexType((coord[ii].x - frameLow.x) * dcellxi);
targetCelly = IndexType((coord[ii].y - frameLow.y) * dcellyi);
targetCellz = IndexType((coord[ii].z - frameLow.z) * dcellzi);
if (ptr_de != NULL &&
(targetCellx >= numCell.x ||
targetCelly >= numCell.y ||
targetCellz >= numCell.z)){
*ptr_de = mdErrorOverFlowCellIdx;
if (targetCellx >= numCell.x){
*erridx = targetCellx;
*errsrc = coord[ii].x;
errsrc[1] = 0;
errsrc[2] = frameLow.x;
errsrc[3] = dcellxi;
errsrc[4] = numCell.x;
}
if (targetCelly >= numCell.y) {
*erridx = targetCelly;
*errsrc = coord[ii].y;
errsrc[1] = 1;
errsrc[2] = frameLow.y;
errsrc[3] = dcellyi;
errsrc[4] = numCell.y;
}
if (targetCellz >= numCell.z) {
*erridx = targetCellz;
*errsrc = coord[ii].z;
errsrc[1] = 2;
errsrc[2] = frameLow.z;
errsrc[3] = dcellzi;
errsrc[4] = numCell.z;
}
return;
}
IndexType cellid = CudaDevice::D3toD1
(numCell, targetCellx, targetCelly, targetCellz);
forwardMap[ii] = ii;
if (cellid != bid){
IndexType pid = atomicAdd (&numAtomInCell[cellid], 1);
if (pid >= blockDim.x){
*ptr_de = mdErrorShortCellList;
pid = 0;
}
IndexType targetIndex = pid + cellid * blockDim.x;
coord[targetIndex] = coord[ii];
coordNoi[targetIndex].x = coordNoi[ii].x;
coordNoi[targetIndex].y = coordNoi[ii].y;
coordNoi[targetIndex].z = coordNoi[ii].z;
velox[targetIndex] = velox[ii];
veloy[targetIndex] = veloy[ii];
veloz[targetIndex] = veloz[ii];
forcx[targetIndex] = forcx[ii];
forcy[targetIndex] = forcy[ii];
forcz[targetIndex] = forcz[ii];
globalIndex[targetIndex] = globalIndex[ii];
globalIndex[ii] = MaxIndexValue;
type[targetIndex] = type[ii];
mass[targetIndex] = mass[ii];
charge[targetIndex] = charge[ii];
forwardMap[ii] = targetIndex;
}
}
return;
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step2 (IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge,
IndexType * forwardMap,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
// IndexType k = NUintBit - 1;
IndexType this_numAtomInCell = numAtomInCell[bid];
if (tid < this_numAtomInCell) {
forwardMap[ii] = ii;
}
else {
forwardMap[ii] = MaxIndexValue;
}
__syncthreads();
if (this_numAtomInCell == 0) return;
extern __shared__ volatile IndexType sbuff[];
volatile IndexType * myIndex = (volatile IndexType * )sbuff;
if (tid >= this_numAtomInCell || globalIndex[ii] == MaxIndexValue){
myIndex[tid] = MaxIndexValue;
}
else {
myIndex[tid] = tid;
}
__syncthreads();
IndexType total = headSort (myIndex, &sbuff[blockDim.x]);
total = blockDim.x - total;
IndexType fromId;
CoordType bk_coord;
CoordNoiType bk_coordNoi;
ScalorType bk_velox, bk_veloy, bk_veloz;
ScalorType bk_forcx, bk_forcy, bk_forcz;
IndexType bk_globalIndex;
TypeType bk_type;
ScalorType bk_mass;
ScalorType bk_charge;
__syncthreads();
if (tid < total){
fromId = myIndex[tid] + bid * blockDim.x;
if (ii != fromId){
bk_coord = coord[fromId];
bk_coordNoi.x = coordNoi[fromId].x;
bk_coordNoi.y = coordNoi[fromId].y;
bk_coordNoi.z = coordNoi[fromId].z;
bk_velox = velox[fromId];
bk_veloy = veloy[fromId];
bk_veloz = veloz[fromId];
bk_forcx = forcx[fromId];
bk_forcy = forcy[fromId];
bk_forcz = forcz[fromId];
bk_globalIndex = globalIndex[fromId];
bk_type = type[fromId];
bk_mass = mass[fromId];
bk_charge = charge[fromId];
forwardMap[fromId] = ii;
}
}
__syncthreads();
if (tid < total && ii != fromId){
coord[ii] = bk_coord;
coordNoi[ii].x = bk_coordNoi.x;
coordNoi[ii].y = bk_coordNoi.y;
coordNoi[ii].z = bk_coordNoi.z;
velox[ii] = bk_velox;
veloy[ii] = bk_veloy;
veloz[ii] = bk_veloz;
forcx[ii] = bk_forcx;
forcy[ii] = bk_forcy;
forcz[ii] = bk_forcz;
globalIndex[ii] = bk_globalIndex;
type[ii] = bk_type;
mass[ii] = bk_mass;
charge[ii] = bk_charge;
}
if (tid == 0){
numAtomInCell[bid] = total;
}
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step1_mapBondTop (const IndexType * forwardMap,
const IndexType * bk_numAtomInCell,
IndexType maxNumBond,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType * bondNeighbor_localIndex,
IndexType maxNumAngle,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi,
IndexType * angleNeighbor_globalIndex,
IndexType * angleNeighbor_localIndex,
IndexType maxNumDihedral,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi,
IndexType * dihedralNeighbor_globalIndex,
IndexType * dihedralNeighbor_localIndex,
IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType this_numAtomInCell = bk_numAtomInCell[bid];
if (this_numAtomInCell == 0) return;
IndexType fromIndex = tid + bid * blockDim.x;
IndexType toIndex = forwardMap[fromIndex];
if (maxNumBond != 0){
IndexType my_numBond;
if (tid < this_numAtomInCell){
my_numBond = (numBond[fromIndex]);
IndexType my_index = fromIndex;
for (IndexType i = 0; i < my_numBond; ++i){
IndexType tmp0 = bondNeighbor_localIndex[my_index];
IndexType tmp1 = forwardMap[tmp0];
if (tmp1 != MaxIndexValue){
bondNeighbor_localIndex[my_index] = tmp1;
}
my_index += bondTopStride;
}
}
__syncthreads();
if (toIndex != MaxIndexValue && toIndex != fromIndex){
numBond[toIndex] = my_numBond;
numBond[fromIndex] = 0;
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType i = 0; i < my_numBond; ++i){
bondIndex[my_toIndex] = bondIndex[my_fromIndex];
bondNeighbor_globalIndex[my_toIndex] =
bondNeighbor_globalIndex[my_fromIndex];
bondNeighbor_localIndex [my_toIndex] =
bondNeighbor_localIndex [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
if (maxNumAngle != 0){
IndexType my_numAngle;
if (tid < this_numAtomInCell){
my_numAngle = (numAngle[fromIndex]);
IndexType my_index = fromIndex;
for (IndexType i = 0; i < my_numAngle * 2; ++i){
IndexType tmp0 = angleNeighbor_localIndex[my_index];
IndexType tmp1 = forwardMap[tmp0];
if (tmp1 != MaxIndexValue){
angleNeighbor_localIndex[my_index] = tmp1;
}
my_index += bondTopStride;
}
}
__syncthreads();
if (toIndex != MaxIndexValue && toIndex != fromIndex){
numAngle[toIndex] = my_numAngle;
numAngle[fromIndex] = 0;
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType i = 0; i < my_numAngle; ++i){
angleIndex[my_toIndex] = angleIndex[my_fromIndex];
anglePosi [my_toIndex] = anglePosi [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType i = 0; i < my_numAngle * 2; ++i){
angleNeighbor_globalIndex[my_toIndex] =
angleNeighbor_globalIndex[my_fromIndex];
angleNeighbor_localIndex [my_toIndex] =
angleNeighbor_localIndex [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
if (maxNumDihedral != 0){
IndexType my_numDihedral;
if (tid < this_numAtomInCell){
my_numDihedral = (numDihedral[fromIndex]);
IndexType my_index = fromIndex;
for (IndexType i = 0; i < my_numDihedral * 3; ++i){
IndexType tmp0 = dihedralNeighbor_localIndex[my_index];
IndexType tmp1 = forwardMap[tmp0];
if (tmp1 != MaxIndexValue){
dihedralNeighbor_localIndex[my_index] = tmp1;
}
my_index += bondTopStride;
}
}
__syncthreads();
if (toIndex != MaxIndexValue && toIndex != fromIndex){
numDihedral[toIndex] = my_numDihedral;
numDihedral[fromIndex] = 0;
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType i = 0; i < my_numDihedral; ++i){
dihedralIndex[my_toIndex] = dihedralIndex[my_fromIndex];
dihedralPosi [my_toIndex] = dihedralPosi [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType i = 0; i < my_numDihedral * 3; ++i){
dihedralNeighbor_globalIndex[my_toIndex] =
dihedralNeighbor_globalIndex[my_fromIndex];
dihedralNeighbor_localIndex [my_toIndex] =
dihedralNeighbor_localIndex [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
// if (toIndex != MaxIndexValue){
// if (maxNumBond != 0){
// IndexType my_numBond = (numBond[toIndex] = numBond[fromIndex]);
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numBond; ++i){
// bondIndex[my_toIndex] = bondIndex[my_fromIndex];
// bondNeighbor_globalIndex[my_toIndex] =
// bondNeighbor_globalIndex[my_fromIndex];
// IndexType tmpIndex0 = bondNeighbor_localIndex[my_fromIndex];
// IndexType tmpIndex1 = forwardMap[tmpIndex0];
// if (tmpIndex1 == MaxIndexValue){
// bondNeighbor_localIndex [my_toIndex] = tmpIndex0;
// }
// else {
// bondNeighbor_localIndex [my_toIndex] = tmpIndex1;
// }
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumAngle != 0){
// IndexType my_numAngle = (numAngle[toIndex] = numAngle[fromIndex]);
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numAngle; ++i){
// angleIndex[my_toIndex] = angleIndex[my_fromIndex];
// anglePosi [my_toIndex] = anglePosi [my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numAngle * 2; ++i){
// angleNeighbor_globalIndex[my_toIndex] =
// angleNeighbor_globalIndex[my_fromIndex];
// IndexType tmpIndex0 = angleNeighbor_localIndex[my_fromIndex];
// IndexType tmpIndex1 = forwardMap[tmpIndex0];
// if (tmpIndex1 == MaxIndexValue){
// angleNeighbor_localIndex [my_toIndex] = tmpIndex0;
// }
// else {
// angleNeighbor_localIndex [my_toIndex] = tmpIndex1;
// }
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumDihedral != 0){
// IndexType my_numDihedral = (numDihedral[toIndex] = numDihedral[fromIndex]);
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numDihedral; ++i){
// dihedralIndex[my_toIndex] = dihedralIndex[my_fromIndex];
// dihedralPosi [my_toIndex] = dihedralPosi [my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numDihedral * 3; ++i){
// dihedralNeighbor_globalIndex[my_toIndex] =
// dihedralNeighbor_globalIndex[my_fromIndex];
// IndexType tmpIndex0 = dihedralNeighbor_localIndex[my_fromIndex];
// IndexType tmpIndex1 = forwardMap[tmpIndex0];
// if (tmpIndex1 == MaxIndexValue){
// dihedralNeighbor_localIndex [my_toIndex] = tmpIndex0;
// }
// else {
// dihedralNeighbor_localIndex [my_toIndex] = tmpIndex1;
// }
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// }
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step2_mapBondTop (const IndexType * forwardMap,
const IndexType * bk_numAtomInCell,
IndexType maxNumBond,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType * bondNeighbor_localIndex,
IndexType maxNumAngle,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi,
IndexType * angleNeighbor_globalIndex,
IndexType * angleNeighbor_localIndex,
IndexType maxNumDihedral,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi,
IndexType * dihedralNeighbor_globalIndex,
IndexType * dihedralNeighbor_localIndex,
IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType this_numAtomInCell = bk_numAtomInCell[bid];
if (this_numAtomInCell == 0) return;
IndexType fromIndex = tid + bid * blockDim.x;
IndexType toIndex = forwardMap[fromIndex];
bool docopy = (toIndex != MaxIndexValue && fromIndex != toIndex);
IndexType bk_num, bk_Index, bk_Posi;
IndexType bk_Neighbor_globalIndex, bk_Neighbor_localIndex;
if (maxNumBond != 0){
IndexType my_numBond;
if (tid < this_numAtomInCell){
my_numBond = (numBond[fromIndex]);
IndexType my_index = fromIndex;
for (IndexType i = 0; i < my_numBond; ++i){
IndexType tmp0 = bondNeighbor_localIndex[my_index];
IndexType tmp1 = forwardMap[tmp0];
if (tmp1 != MaxIndexValue){
bondNeighbor_localIndex[my_index] = tmp1;
}
my_index += bondTopStride;
}
}
__syncthreads();
if (maxNumBond != 0){
if (docopy){
bk_num = my_numBond;
}
__syncthreads();
if (docopy){
numBond[toIndex] = bk_num;
}
__syncthreads();
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumBond; ++kk){
if (docopy){
bk_Index = bondIndex[my_fromIndex];
bk_Neighbor_globalIndex = bondNeighbor_globalIndex[my_fromIndex];
bk_Neighbor_localIndex = bondNeighbor_localIndex [my_fromIndex];
}
__syncthreads();
if (docopy){
bondIndex[my_toIndex] = bk_Index;
bondNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
bondNeighbor_localIndex [my_toIndex] = bk_Neighbor_localIndex;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
if (maxNumAngle != 0){
IndexType my_numAngle;
if (tid < this_numAtomInCell){
my_numAngle = (numAngle[fromIndex]);
IndexType my_index = fromIndex;
for (IndexType i = 0; i < my_numAngle * 2; ++i){
IndexType tmp0 = angleNeighbor_localIndex[my_index];
IndexType tmp1 = forwardMap[tmp0];
if (tmp1 != MaxIndexValue){
angleNeighbor_localIndex[my_index] = tmp1;
}
my_index += bondTopStride;
}
}
__syncthreads();
if (maxNumAngle != 0){
if (docopy){
bk_num = my_numAngle;
}
__syncthreads();
if (docopy){
numAngle[toIndex] = bk_num;
}
__syncthreads();
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumAngle; ++kk){
if (docopy){
bk_Index = angleIndex[my_fromIndex];
bk_Posi = anglePosi [my_fromIndex];
}
__syncthreads();
if (docopy){
angleIndex[my_toIndex] = bk_Index;
anglePosi [my_toIndex] = bk_Posi ;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumAngle * 2; ++kk){
if (docopy){
bk_Neighbor_globalIndex = angleNeighbor_globalIndex[my_fromIndex];
bk_Neighbor_localIndex = angleNeighbor_localIndex [my_fromIndex];
}
__syncthreads();
if (docopy){
angleNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
angleNeighbor_localIndex [my_toIndex] = bk_Neighbor_localIndex;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
if (maxNumDihedral != 0){
IndexType my_numDihedral;
if (tid < this_numAtomInCell){
my_numDihedral = (numDihedral[fromIndex]);
IndexType my_index = fromIndex;
for (IndexType i = 0; i < my_numDihedral * 3; ++i){
IndexType tmp0 = dihedralNeighbor_localIndex[my_index];
IndexType tmp1 = forwardMap[tmp0];
if (tmp1 != MaxIndexValue){
dihedralNeighbor_localIndex[my_index] = tmp1;
}
my_index += bondTopStride;
}
}
__syncthreads();
if (maxNumDihedral != 0){
if (docopy){
bk_num = my_numDihedral;
}
__syncthreads();
if (docopy){
numDihedral[toIndex] = bk_num;
}
__syncthreads();
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumDihedral; ++kk){
if (docopy){
bk_Index = dihedralIndex[my_fromIndex];
bk_Posi = dihedralPosi [my_fromIndex];
}
__syncthreads();
if (docopy){
dihedralIndex[my_toIndex] = bk_Index;
dihedralPosi [my_toIndex] = bk_Posi ;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumDihedral * 3; ++kk){
if (docopy){
bk_Neighbor_globalIndex = dihedralNeighbor_globalIndex[my_fromIndex];
bk_Neighbor_localIndex = dihedralNeighbor_localIndex [my_fromIndex];
}
__syncthreads();
if (docopy){
dihedralNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
dihedralNeighbor_localIndex [my_toIndex] = bk_Neighbor_localIndex;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
// if (maxNumBond != 0){
// if (docopy){
// bk_num = numBond[fromIndex];
// }
// __syncthreads();
// if (docopy){
// numBond[toIndex] = bk_num;
// }
// __syncthreads();
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumBond; ++kk){
// if (docopy){
// bk_Index = bondIndex[my_fromIndex];
// bk_Neighbor_globalIndex = bondNeighbor_globalIndex[my_fromIndex];
// IndexType tmpIndex0 = bondNeighbor_localIndex[my_fromIndex];
// IndexType tmpIndex1 = forwardMap[tmpIndex0];
// if (tmpIndex1 == MaxIndexValue){
// bk_Neighbor_localIndex = tmpIndex0;
// }
// else {
// bk_Neighbor_localIndex = tmpIndex1;
// }
// }
// __syncthreads();
// if (docopy){
// bondIndex[my_toIndex] = bk_Index;
// bondNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
// bondNeighbor_localIndex [my_toIndex] = bk_Neighbor_localIndex;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumAngle != 0){
// if (docopy){
// bk_num = numAngle[fromIndex];
// }
// __syncthreads();
// if (docopy){
// numAngle[toIndex] = bk_num;
// }
// __syncthreads();
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumAngle; ++kk){
// if (docopy){
// bk_Index = angleIndex[my_fromIndex];
// bk_Posi = anglePosi [my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// angleIndex[my_toIndex] = bk_Index;
// anglePosi [my_toIndex] = bk_Posi ;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumAngle * 2; ++kk){
// if (docopy){
// bk_Neighbor_globalIndex = angleNeighbor_globalIndex[my_fromIndex];
// IndexType tmpIndex0 = angleNeighbor_localIndex[my_fromIndex];
// IndexType tmpIndex1 = forwardMap[tmpIndex0];
// if (tmpIndex1 == MaxIndexValue){
// bk_Neighbor_localIndex = tmpIndex0;
// }
// else {
// bk_Neighbor_localIndex = tmpIndex1;
// }
// }
// __syncthreads();
// if (docopy){
// angleNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
// angleNeighbor_localIndex [my_toIndex] = bk_Neighbor_localIndex;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumDihedral != 0){
// if (docopy){
// bk_num = numDihedral[fromIndex];
// }
// __syncthreads();
// if (docopy){
// numDihedral[toIndex] = bk_num;
// }
// __syncthreads();
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumDihedral; ++kk){
// if (docopy){
// bk_Index = dihedralIndex[my_fromIndex];
// bk_Posi = dihedralPosi [my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// dihedralIndex[my_toIndex] = bk_Index;
// dihedralPosi [my_toIndex] = bk_Posi ;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumDihedral * 3; ++kk){
// if (docopy){
// bk_Neighbor_globalIndex = dihedralNeighbor_globalIndex[my_fromIndex];
// IndexType tmpIndex0 = dihedralNeighbor_localIndex[my_fromIndex];
// IndexType tmpIndex1 = forwardMap[tmpIndex0];
// if (tmpIndex1 == MaxIndexValue){
// bk_Neighbor_localIndex = tmpIndex0;
// }
// else {
// bk_Neighbor_localIndex = tmpIndex1;
// }
// }
// __syncthreads();
// if (docopy){
// dihedralNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
// dihedralNeighbor_localIndex [my_toIndex] = bk_Neighbor_localIndex;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step1_mapBondTop (const IndexType * forwardMap,
const IndexType * bk_numAtomInCell,
IndexType maxNumBond,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType maxNumAngle,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi,
IndexType * angleNeighbor_globalIndex,
IndexType maxNumDihedral,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi,
IndexType * dihedralNeighbor_globalIndex,
IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType this_numAtomInCell = bk_numAtomInCell[bid];
if (this_numAtomInCell == 0) return;
IndexType fromIndex = tid + bid * blockDim.x;
IndexType toIndex = forwardMap[fromIndex];
if (maxNumBond != 0){
if (toIndex != MaxIndexValue && toIndex != fromIndex){
IndexType my_numBond = numBond[toIndex] = numBond[fromIndex];
numBond[fromIndex] = 0;
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType i = 0; i < my_numBond; ++i){
bondIndex[my_toIndex] = bondIndex[my_fromIndex];
bondNeighbor_globalIndex[my_toIndex] =
bondNeighbor_globalIndex[my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
if (maxNumAngle != 0){
if (toIndex != MaxIndexValue && toIndex != fromIndex){
IndexType my_numAngle = numAngle[toIndex] = numAngle[fromIndex];
numAngle[fromIndex] = 0;
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType i = 0; i < my_numAngle; ++i){
angleIndex[my_toIndex] = angleIndex[my_fromIndex];
anglePosi [my_toIndex] = anglePosi [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType i = 0; i < my_numAngle * 2; ++i){
angleNeighbor_globalIndex[my_toIndex] =
angleNeighbor_globalIndex[my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
if (maxNumDihedral != 0){
if (toIndex != MaxIndexValue && toIndex != fromIndex){
IndexType my_numDihedral = numDihedral[toIndex] = numDihedral[fromIndex];
numDihedral[fromIndex] = 0;
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType i = 0; i < my_numDihedral; ++i){
dihedralIndex[my_toIndex] = dihedralIndex[my_fromIndex];
dihedralPosi [my_toIndex] = dihedralPosi [my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType i = 0; i < my_numDihedral * 3; ++i){
dihedralNeighbor_globalIndex[my_toIndex] =
dihedralNeighbor_globalIndex[my_fromIndex];
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
}
__global__ void Parallel::CudaGlobal::
rebuildCellList_step2_mapBondTop (const IndexType * forwardMap,
const IndexType * bk_numAtomInCell,
IndexType maxNumBond,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType maxNumAngle,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi,
IndexType * angleNeighbor_globalIndex,
IndexType maxNumDihedral,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi,
IndexType * dihedralNeighbor_globalIndex,
IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType this_numAtomInCell = bk_numAtomInCell[bid];
if (this_numAtomInCell == 0) return;
IndexType fromIndex = tid + bid * blockDim.x;
IndexType toIndex = forwardMap[fromIndex];
bool docopy = (toIndex != MaxIndexValue && fromIndex != toIndex);
IndexType bk_num, bk_Index, bk_Posi;
IndexType bk_Neighbor_globalIndex;
if (maxNumBond != 0){
if (docopy){
bk_num = (numBond[fromIndex]);
}
__syncthreads();
if (docopy){
numBond[toIndex] = bk_num;
}
__syncthreads();
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumBond; ++kk){
if (docopy){
bk_Index = bondIndex[my_fromIndex];
bk_Neighbor_globalIndex = bondNeighbor_globalIndex[my_fromIndex];
}
__syncthreads();
if (docopy){
bondIndex[my_toIndex] = bk_Index;
bondNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
if (maxNumAngle != 0){
if (docopy){
bk_num = (numAngle[fromIndex]);
}
__syncthreads();
if (docopy){
numAngle[toIndex] = bk_num;
}
__syncthreads();
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumAngle; ++kk){
if (docopy){
bk_Index = angleIndex[my_fromIndex];
bk_Posi = anglePosi [my_fromIndex];
}
__syncthreads();
if (docopy){
angleIndex[my_toIndex] = bk_Index;
anglePosi [my_toIndex] = bk_Posi ;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumAngle * 2; ++kk){
if (docopy){
bk_Neighbor_globalIndex = angleNeighbor_globalIndex[my_fromIndex];
}
__syncthreads();
if (docopy){
angleNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
if (maxNumDihedral != 0){
if (docopy){
bk_num = (numDihedral[fromIndex]);
}
__syncthreads();
if (docopy){
numDihedral[toIndex] = bk_num;
}
__syncthreads();
IndexType my_fromIndex = fromIndex;
IndexType my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumDihedral; ++kk){
if (docopy){
bk_Index = dihedralIndex[my_fromIndex];
bk_Posi = dihedralPosi [my_fromIndex];
}
__syncthreads();
if (docopy){
dihedralIndex[my_toIndex] = bk_Index;
dihedralPosi [my_toIndex] = bk_Posi ;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
my_fromIndex = fromIndex;
my_toIndex = toIndex;
for (IndexType kk = 0; kk < maxNumDihedral * 3; ++kk){
if (docopy){
bk_Neighbor_globalIndex = dihedralNeighbor_globalIndex[my_fromIndex];
}
__syncthreads();
if (docopy){
dihedralNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
}
__syncthreads();
my_fromIndex += bondTopStride;
my_toIndex += bondTopStride;
}
}
}
// __global__ void Parallel::CudaGlobal::
// rebuildCellList_step1_mapBondTop (const IndexType * forwardMap,
// IndexType maxNumBond,
// IndexType * numBond,
// IndexType * bondIndex,
// IndexType * bondNeighbor_globalIndex,
// IndexType maxNumAngle,
// IndexType * numAngle,
// IndexType * angleIndex,
// IndexType * anglePosi,
// IndexType * angleNeighbor_globalIndex,
// IndexType maxNumDihedral,
// IndexType * numDihedral,
// IndexType * dihedralIndex,
// IndexType * dihedralPosi,
// IndexType * dihedralNeighbor_globalIndex,
// IndexType bondTopStride)
// {
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// IndexType fromIndex = tid + bid * blockDim.x;
// IndexType toIndex = forwardMap[fromIndex];
// if (toIndex != MaxIndexValue){
// if (maxNumBond != 0){
// IndexType my_numBond = (numBond[toIndex] = numBond[fromIndex]);
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numBond; ++i){
// bondIndex[my_toIndex] = bondIndex[my_fromIndex];
// bondNeighbor_globalIndex[my_toIndex] =
// bondNeighbor_globalIndex[my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumAngle != 0){
// IndexType my_numAngle = (numAngle[toIndex] = numAngle[fromIndex]);
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numAngle; ++i){
// angleIndex[my_toIndex] = angleIndex[my_fromIndex];
// anglePosi [my_toIndex] = anglePosi [my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numAngle * 2; ++i){
// angleNeighbor_globalIndex[my_toIndex] =
// angleNeighbor_globalIndex[my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumDihedral != 0){
// IndexType my_numDihedral = (numDihedral[toIndex] = numDihedral[fromIndex]);
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numDihedral; ++i){
// dihedralIndex[my_toIndex] = dihedralIndex[my_fromIndex];
// dihedralPosi [my_toIndex] = dihedralPosi [my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType i = 0; i < my_numDihedral * 3; ++i){
// dihedralNeighbor_globalIndex[my_toIndex] =
// dihedralNeighbor_globalIndex[my_fromIndex];
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// }
// }
// __global__ void Parallel::CudaGlobal::
// rebuildCellList_step2_mapBondTop (const IndexType * forwardMap,
// IndexType maxNumBond,
// IndexType * numBond,
// IndexType * bondIndex,
// IndexType * bondNeighbor_globalIndex,
// IndexType maxNumAngle,
// IndexType * numAngle,
// IndexType * angleIndex,
// IndexType * anglePosi,
// IndexType * angleNeighbor_globalIndex,
// IndexType maxNumDihedral,
// IndexType * numDihedral,
// IndexType * dihedralIndex,
// IndexType * dihedralPosi,
// IndexType * dihedralNeighbor_globalIndex,
// IndexType bondTopStride)
// {
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// IndexType fromIndex = tid + bid * blockDim.x;
// IndexType toIndex = forwardMap[fromIndex];
// bool docopy = (toIndex != MaxIndexValue);
// IndexType bk_num, bk_Index, bk_Posi;
// IndexType bk_Neighbor_globalIndex;
// if (maxNumBond != 0){
// if (docopy){
// bk_num = numBond[fromIndex];
// }
// __syncthreads();
// if (docopy){
// numBond[toIndex] = bk_num;
// }
// __syncthreads();
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumBond; ++kk){
// if (docopy){
// bk_Index = bondIndex[my_fromIndex];
// bk_Neighbor_globalIndex = bondNeighbor_globalIndex[my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// bondIndex[my_toIndex] = bk_Index;
// bondNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumAngle != 0){
// if (docopy){
// bk_num = numAngle[fromIndex];
// }
// __syncthreads();
// if (docopy){
// numAngle[toIndex] = bk_num;
// }
// __syncthreads();
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumAngle; ++kk){
// if (docopy){
// bk_Index = angleIndex[my_fromIndex];
// bk_Posi = anglePosi [my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// angleIndex[my_toIndex] = bk_Index;
// anglePosi [my_toIndex] = bk_Posi ;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumAngle * 2; ++kk){
// if (docopy){
// bk_Neighbor_globalIndex = angleNeighbor_globalIndex[my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// angleNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// if (maxNumDihedral != 0){
// if (docopy){
// bk_num = numDihedral[fromIndex];
// }
// __syncthreads();
// if (docopy){
// numDihedral[toIndex] = bk_num;
// }
// __syncthreads();
// IndexType my_fromIndex = fromIndex;
// IndexType my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumDihedral; ++kk){
// if (docopy){
// bk_Index = dihedralIndex[my_fromIndex];
// bk_Posi = dihedralPosi [my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// dihedralIndex[my_toIndex] = bk_Index;
// dihedralPosi [my_toIndex] = bk_Posi ;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// my_fromIndex = fromIndex;
// my_toIndex = toIndex;
// for (IndexType kk = 0; kk < maxNumDihedral * 3; ++kk){
// if (docopy){
// bk_Neighbor_globalIndex = dihedralNeighbor_globalIndex[my_fromIndex];
// }
// __syncthreads();
// if (docopy){
// dihedralNeighbor_globalIndex[my_toIndex] = bk_Neighbor_globalIndex;
// }
// __syncthreads();
// my_fromIndex += bondTopStride;
// my_toIndex += bondTopStride;
// }
// }
// }
// __global__ void Parallel::CudaGlobal::
// rebuildCellList_step1 (const VectorType frameLow,
// const VectorType frameUp,
// const IntVectorType numCell,
// const IndexType * bk_numAtomInCell,
// IndexType * numAtomInCell,
// CoordType * coord,
// CoordNoiType * coordNoi,
// ScalorType * velox,
// ScalorType * veloy,
// ScalorType * veloz,
// ScalorType * forcx,
// ScalorType * forcy,
// ScalorType * forcz,
// IndexType * globalIndex,
// TypeType * type,
// ScalorType * mass,
// ScalorType * charge,
// mdError_t * ptr_de)
// {
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// IndexType ii = tid + bid * blockDim.x;
// __shared__ ScalorType dcellxi;
// __shared__ ScalorType dcellyi;
// __shared__ ScalorType dcellzi;
// if (tid == 0) {
// dcellxi = ScalorType(numCell.x) / (frameUp.x - frameLow.x);
// dcellyi = ScalorType(numCell.y) / (frameUp.y - frameLow.y);
// dcellzi = ScalorType(numCell.z) / (frameUp.z - frameLow.z);
// }
// __syncthreads();
// bool inRange = tid < bk_numAtomInCell[bid];
// if (inRange){
// IndexType targetCellx, targetCelly, targetCellz;
// targetCellx = IndexType((coord[ii].x - frameLow.x) * dcellxi);
// targetCelly = IndexType((coord[ii].y - frameLow.y) * dcellyi);
// targetCellz = IndexType((coord[ii].z - frameLow.z) * dcellzi);
// if (ptr_de != NULL &&
// (targetCellx >= numCell.x ||
// targetCelly >= numCell.y ||
// targetCellz >= numCell.z)){
// *ptr_de = mdErrorOverFlowCellIdx;
// return;
// }
// IndexType cellid = CudaDevice::D3toD1
// (numCell, targetCellx, targetCelly, targetCellz);
// if (cellid != bid){
// IndexType pid = atomicAdd (&numAtomInCell[cellid], 1);
// if (pid >= blockDim.x){
// *ptr_de = mdErrorShortCellList;
// pid = 0;
// }
// IndexType targetIndex = pid + cellid * blockDim.x;
// coord[targetIndex] = coord[ii];
// coordNoi[targetIndex].x = coordNoi[ii].x;
// coordNoi[targetIndex].y = coordNoi[ii].y;
// coordNoi[targetIndex].z = coordNoi[ii].z;
// velox[targetIndex] = velox[ii];
// veloy[targetIndex] = veloy[ii];
// veloz[targetIndex] = veloz[ii];
// forcx[targetIndex] = forcx[ii];
// forcy[targetIndex] = forcy[ii];
// forcz[targetIndex] = forcz[ii];
// globalIndex[targetIndex] = globalIndex[ii];
// globalIndex[ii] = MaxIndexValue;
// type[targetIndex] = type[ii];
// mass[targetIndex] = mass[ii];
// charge[targetIndex] = charge[ii];
// IndexType my_numBond = (numBond[targetIndex] = numBond[ii]);
// for (IndexType kk = 0; kk < my_numBond; ++kk){
// IndexType fromListIndex = Parallel::DeviceBondList_cudaDevice::indexConvert
// (bondTopStride, ii, kk);
// IndexType toListIndex = Parallel::DeviceBondList_cudaDevice::indexConvert
// (bondTopStride, targetIndex, kk);
// bondNeighbor_globalIndex[toListIndex] = bondNeighbor_globalIndex[fromListIndex];
// bondIndex[toListIndex] = bondIndex[fromListIndex];
// bondNeighbor_localIndex[toListIndex] = bondNeighbor_localIndex[fromListIndex];
// }
// }
// }
// return;
// }
// __global__ void Parallel::CudaGlobal::
// rebuildCellList_step2 (IndexType * numAtomInCell,
// CoordType * coord,
// CoordNoiType * coordNoi,
// ScalorType * velox,
// ScalorType * veloy,
// ScalorType * veloz,
// ScalorType * forcx,
// ScalorType * forcy,
// ScalorType * forcz,
// IndexType * globalIndex,
// TypeType * type,
// ScalorType * mass,
// ScalorType * charge,
// IndexType * numBond,
// IndexType * bondNeighbor_globalIndex,
// IndexType * bondIndex,
// IndexType * bondNeighbor_localIndex,
// IndexType bondTopStride,
// IndexType maxNumBond,
// mdError_t * ptr_de)
// {
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// IndexType ii = tid + bid * blockDim.x;
// // IndexType k = NUintBit - 1;
// extern __shared__ volatile IndexType sbuff[];
// volatile IndexType * myIndex = (volatile IndexType * )sbuff;
// if (tid >= numAtomInCell[bid] || globalIndex[ii] == MaxIndexValue){
// myIndex[tid] = MaxIndexValue;
// }
// else {
// myIndex[tid] = tid;
// }
// __syncthreads();
// IndexType total = headSort (myIndex, &sbuff[blockDim.x]);
// total = blockDim.x - total;
// IndexType fromId;
// CoordType bk_coord;
// CoordNoiType bk_coordNoi;
// ScalorType bk_velox, bk_veloy, bk_veloz;
// ScalorType bk_forcx, bk_forcy, bk_forcz;
// IndexType bk_globalIndex;
// TypeType bk_type;
// ScalorType bk_mass;
// ScalorType bk_charge;
// IndexType bk_numBond;
// if (tid < total){
// fromId = myIndex[tid] + bid * blockDim.x;
// if (ii != fromId){
// bk_coord = coord[fromId];
// bk_coordNoi.x = coordNoi[fromId].x;
// bk_coordNoi.y = coordNoi[fromId].y;
// bk_coordNoi.z = coordNoi[fromId].z;
// bk_velox = velox[fromId];
// bk_veloy = veloy[fromId];
// bk_veloz = veloz[fromId];
// bk_forcx = forcx[fromId];
// bk_forcy = forcy[fromId];
// bk_forcz = forcz[fromId];
// bk_globalIndex = globalIndex[fromId];
// bk_type = type[fromId];
// bk_mass = mass[fromId];
// bk_charge = charge[fromId];
// bk_numBond = numBond[fromId];
// }
// }
// __syncthreads();
// bool docopy = (tid < total && ii != fromId);
// if (docopy){
// coord[ii] = bk_coord;
// coordNoi[ii].x = bk_coordNoi.x;
// coordNoi[ii].y = bk_coordNoi.y;
// coordNoi[ii].z = bk_coordNoi.z;
// velox[ii] = bk_velox;
// veloy[ii] = bk_veloy;
// veloz[ii] = bk_veloz;
// forcx[ii] = bk_forcx;
// forcy[ii] = bk_forcy;
// forcz[ii] = bk_forcz;
// globalIndex[ii] = bk_globalIndex;
// type[ii] = bk_type;
// mass[ii] = bk_mass;
// charge[ii] = bk_charge;
// numBond[ii] = bk_numBond;
// }
// IndexType bk_bondNeighbor_globalIndex;
// IndexType bk_bondIndex;
// IndexType bk_bondNeighbor_localIndex;
// for (IndexType kk = 0; kk < maxNumBond; ++kk){
// __syncthreads();
// if (docopy){
// bk_bondNeighbor_globalIndex = bondNeighbor_globalIndex[fromId];
// bk_bondIndex = bondIndex[fromId];
// bk_bondNeighbor_localIndex = bondNeighbor_localIndex[fromId];
// }
// __syncthreads();
// if (docopy){
// bondNeighbor_globalIndex [ii] = bk_bondNeighbor_globalIndex;
// bondIndex[ii] = bk_bondIndex;
// bondNeighbor_localIndex[ii] = bk_bondNeighbor_localIndex;
// }
// }
// if (tid == 0){
// numAtomInCell[bid] = total;
// }
// }
void Parallel::DeviceCellListedMDData::
easyMallocCell (const IndexType & totalNumCell)
{
if (totalNumCell == 0) return;
// if (totalNumCell == numCell.x * numCell.y * numCell.z) return;
// maxNumNeighborCell = maxNumNeighborCell_;
clearCell ();
cudaMalloc ((void**)&numAtomInCell, sizeof(IndexType) * totalNumCell);
cudaMalloc ((void**)&forwardMap_step1,
sizeof(IndexType) * totalNumCell * Parallel::Interface::numThreadsInCell());
cudaMalloc ((void**)&forwardMap_step2,
sizeof(IndexType) * totalNumCell * Parallel::Interface::numThreadsInCell());
// cudaMalloc ((void**)&numNeighborCell, sizeof(IndexType) * totalNumCell);
// cudaMalloc ((void**)&neighborCellIndex,
// sizeof(IndexType) * totalNumCell * maxNumNeighborCell);
memSize = totalNumCell;
checkCUDAError ("malloc Cell");
malloced = true;
}
void Parallel::DeviceCellListedMDData::
clearCell()
{
if (malloced){
cudaFree (numAtomInCell);
cudaFree (forwardMap_step2);
cudaFree (forwardMap_step1);
memSize = 0;
// cudaFree (numNeighborCell);
// cudaFree (neighborCellIndex);
malloced = false;
}
}
Parallel::DeviceCellListedMDData::
DeviceCellListedMDData ()
{
rlist = 0;
devideLevel = 0;
frameLow.x = frameLow.y = frameLow.z = 0;
frameUp.x = frameUp.y = frameUp.z = 0;
numCell.x = numCell.y = numCell.z = 0;
memSize = 0;
// maxNumNeighborCell = 0;
malloced = false;
}
Parallel::DeviceCellListedMDData::
~DeviceCellListedMDData()
{
clearCell();
}
Parallel::SubCellList::
SubCellList ()
{
}
void Parallel::SubCellList::
build ()
{
Parallel::Interface::sort (this->begin(), this->end());
}
bool Parallel::SubCellList::
isBuilt ()
{
return (Parallel::Interface::is_sorted (this->begin(), this->end()));
}
void Parallel::SubCellList::
add (const SubCellList & a)
{
for (std::vector<IndexType>::const_iterator it = a.begin();
it != a.end(); ++it){
push_back (*it);
}
Parallel::Interface::unique (this->begin(), this->end());
Parallel::Interface::sort (this->begin(), this->end());
}
void Parallel::SubCellList::
sub (const SubCellList & a)
{
std::vector<IndexType > result (this->size());
std::vector<IndexType >::iterator newend =
Parallel::Interface::set_difference (this->begin(), this->end(),
a.begin(), a.end(),
result.begin());
std::vector<IndexType >::iterator newend2 =
Parallel::Interface::copy (result.begin(), newend, this->begin());
this->erase (newend2, this->end());
}
void Parallel::DeviceCellListedMDData::
buildSubList (const IndexType & xIdLo,
const IndexType & xIdUp,
const IndexType & yIdLo,
const IndexType & yIdUp,
const IndexType & zIdLo,
const IndexType & zIdUp,
SubCellList & subList) const
{
if (xIdUp > numCell.x){
throw MDExcptCellList ("x up index exceeds number of cells on x");
}
if (yIdUp > numCell.y){
throw MDExcptCellList ("y up index exceeds number of cells on y");
}
if (zIdUp > numCell.z){
throw MDExcptCellList ("z up index exceeds number of cells on z");
}
subList.clear();
for (IndexType i = xIdLo; i < xIdUp; ++i){
for (IndexType j = yIdLo; j < yIdUp; ++j){
for (IndexType k = zIdLo; k < zIdUp; ++k){
subList.push_back ( D3toD1 (i, j, k));
}
}
}
}
Parallel::DeviceTransferPackage::
DeviceTransferPackage ()
: numCell (0), memSize(0), hcellIndex(NULL), hcellStartIndex(NULL),
myMask (MDDataItemMask_All)
{
}
void Parallel::DeviceTransferPackage::
clearMe ()
{
if (memSize != 0){
cudaFree (cellIndex);
cudaFree (cellStartIndex);
freeAPointer ((void**)&hcellIndex);
freeAPointer ((void**)&hcellStartIndex);
memSize = 0;
numCell = 0;
}
}
Parallel::DeviceTransferPackage::
~DeviceTransferPackage ()
{
clearMe();
}
void Parallel::DeviceTransferPackage::
easyMallocMe (IndexType memSize_)
{
if (memSize_ == 0) return;
// if (memSize == memSize_) return;
clearMe ();
memSize = memSize_;
size_t size = memSize * sizeof(IndexType);
size_t size1 = (memSize+1) * sizeof(IndexType);
cudaMalloc ((void**)&cellIndex, size);
cudaMalloc ((void**)&cellStartIndex, size1);
checkCUDAError ("DeviceTransferPackage::mallocMe failed malloc");
hcellIndex = (IndexType *) malloc (size);
if (hcellIndex == NULL){
throw MDExcptFailedMallocOnHost ("DeviceTransferPackage::reinit",
"hcellIndex", size);
}
hcellStartIndex = (IndexType *) malloc (size1);
if (hcellStartIndex == NULL){
throw MDExcptFailedMallocOnHost ("DeviceTransferPackage::reinit",
"hcellStartIndex", size1);
}
}
void Parallel::DeviceTransferPackage::
reinit (const SubCellList & subCellList)
{
if (memSize < subCellList.size()){
easyMallocMe (subCellList.size()*MemAllocExtension);
}
numCell = subCellList.size();
for (IndexType i = 0; i < numCell; ++i){
hcellIndex[i] = subCellList[i];
}
size_t size = memSize * sizeof(IndexType);
cudaMemcpy (cellIndex, hcellIndex, size, cudaMemcpyHostToDevice);
checkCUDAError ("DeviceTransferPackage::reinit memcpy");
}
void Parallel::DeviceTransferPackage::
pack (const DeviceCellListedMDData & ddata,
const MDDataItemMask_t mask)
{
if (numCell == 0) return;
myMask = mask;
IndexType totalNumCell = ddata.numCell.x * ddata.numCell.y * ddata.numCell.z;
IndexType * numAtomInCell ;
size_t size = totalNumCell * sizeof(IndexType);
numAtomInCell = (IndexType *) malloc (size);
if (numAtomInCell == NULL){
throw MDExcptFailedMallocOnHost ("DeviceTransferPackage::reinit",
"numAtomInCell", size);
}
cudaMemcpy (numAtomInCell, ddata.numAtomInCell, size,
cudaMemcpyDeviceToHost);
checkCUDAError ("DeviceTransferPackage::pack cpy numAtomInCell to host");
hcellStartIndex[0] = 0;
for (IndexType i = 1; i < numCell+1; ++i){
hcellStartIndex[i] = hcellStartIndex[i-1] + numAtomInCell[hcellIndex[i-1]];
}
IndexType & expectedNumData (hcellStartIndex[numCell]);
cudaMemcpy (cellStartIndex, hcellStartIndex, (numCell+1) * sizeof(IndexType),
cudaMemcpyHostToDevice);
checkCUDAError ("DeviceTransferPackage::pack cpy cellStartIndex to device");
free (numAtomInCell);
IndexType expectedNumBond(0), expectedNumAngle(0), expectedNumDihedral(0);
bool copyBond = (mask & MDDataItemMask_Bond);
bool copyAngle = (mask & MDDataItemMask_Angle);
bool copyDihedral = (mask & MDDataItemMask_Dihedral);
if (copyBond) expectedNumBond = ddata.getMaxNumBond();
if (copyAngle) expectedNumAngle = ddata.getMaxNumAngle();
if (copyDihedral) expectedNumDihedral = ddata.getMaxNumDihedral();
DeviceMDData::setGlobalBox (ddata.getGlobalBox());
if (expectedNumData > DeviceMDData::memSize() ){
// printf ("# DeviceTransferPackage::pack, realloc\n");
DeviceMDData::easyMalloc(
expectedNumData * MemAllocExtension,
expectedNumBond, expectedNumAngle, expectedNumDihedral);
}
else if ((copyBond && (getMaxNumBond() != ddata.getMaxNumBond())) ||
(copyAngle && (getMaxNumAngle() != ddata.getMaxNumAngle())) ||
(copyDihedral && (getMaxNumDihedral() != ddata.getMaxNumDihedral())) ){
// printf ("# DeviceTransferPackage::pack, realloc\n");
DeviceMDData::easyMalloc(
DeviceMDData::memSize(),
expectedNumBond, expectedNumAngle, expectedNumDihedral);
}
DeviceMDData::numData() = expectedNumData;
checkCUDAError ("DeviceTransferPackage::pack, packDeviceMDData, before");
Parallel::CudaGlobal::packDeviceMDData
<<<numCell, Parallel::Interface::numThreadsInCell()>>> (
cellIndex,
ddata.dptr_numAtomInCell(),
cellStartIndex,
mask,
ddata.dptr_coordinate(),
ddata.dptr_coordinateNoi(),
ddata.dptr_velocityX(),
ddata.dptr_velocityY(),
ddata.dptr_velocityZ(),
ddata.dptr_forceX(),
ddata.dptr_forceY(),
ddata.dptr_forceZ(),
ddata.dptr_globalIndex(),
ddata.dptr_type(),
ddata.dptr_mass(),
ddata.dptr_charge(),
this->dptr_coordinate(),
this->dptr_coordinateNoi(),
this->dptr_velocityX(),
this->dptr_velocityY(),
this->dptr_velocityZ(),
this->dptr_forceX(),
this->dptr_forceY(),
this->dptr_forceZ(),
this->dptr_globalIndex(),
this->dptr_type(),
this->dptr_mass(),
this->dptr_charge());
Parallel::CudaGlobal::packDeviceMDData_bondTop
<<<numCell, Parallel::Interface::numThreadsInCell()>>> (
cellIndex,
ddata.dptr_numAtomInCell(),
cellStartIndex,
mask,
ddata.dptr_numBond(),
ddata.dptr_bondIndex(),
ddata.dptr_bondNeighbor_globalIndex(),
ddata.dptr_numAngle(),
ddata.dptr_angleIndex(),
ddata.dptr_anglePosi(),
ddata.dptr_angleNeighbor_globalIndex(),
ddata.dptr_numDihedral(),
ddata.dptr_dihedralIndex(),
ddata.dptr_dihedralPosi(),
ddata.dptr_dihedralNeighbor_globalIndex(),
ddata.bondTopStride(),
ddata.getMaxNumBond(),
ddata.getMaxNumAngle(),
ddata.getMaxNumDihedral(),
this->dptr_numBond(),
this->dptr_bondIndex(),
this->dptr_bondNeighbor_globalIndex(),
this->dptr_numAngle(),
this->dptr_angleIndex(),
this->dptr_anglePosi(),
this->dptr_angleNeighbor_globalIndex(),
this->dptr_numDihedral(),
this->dptr_dihedralIndex(),
this->dptr_dihedralPosi(),
this->dptr_dihedralNeighbor_globalIndex(),
this->bondTopStride()
);
checkCUDAError ("DeviceTransferPackage::pack, packDeviceMDData");
}
__global__ void Parallel::CudaGlobal::
packDeviceMDData (const IndexType * cellIndex,
const IndexType * numAtomInCell,
const IndexType * cellStartIndex,
const MDDataItemMask_t mask,
const CoordType * source_coord,
const CoordNoiType * source_coordNoi,
const ScalorType * source_velox,
const ScalorType * source_veloy,
const ScalorType * source_veloz,
const ScalorType * source_forcx,
const ScalorType * source_forcy,
const ScalorType * source_forcz,
const IndexType * source_globalIndex,
const TypeType * source_type,
const ScalorType * source_mass,
const ScalorType * source_charge,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType cellIdx = cellIndex[bid];
IndexType fromid = tid + cellIdx * blockDim.x;
IndexType toid = tid + cellStartIndex[bid];
if (tid < numAtomInCell[cellIdx]){
if (mask & MDDataItemMask_Coordinate){
coord[toid] = source_coord[fromid];
}
if (mask & MDDataItemMask_CoordinateNoi){
coordNoi[toid].x = source_coordNoi[fromid].x;
}
if (mask & MDDataItemMask_Velocity){
velox[toid] = source_velox[fromid];
veloy[toid] = source_veloy[fromid];
veloz[toid] = source_veloz[fromid];
}
if (mask & MDDataItemMask_Force){
forcx[toid] = source_forcx[fromid];
forcy[toid] = source_forcy[fromid];
forcz[toid] = source_forcz[fromid];
}
if (mask & MDDataItemMask_GlobalIndex){
globalIndex[toid] = source_globalIndex[fromid];
}
if (mask & MDDataItemMask_Type){
type[toid] = source_type[fromid];
}
if (mask & MDDataItemMask_Mass){
mass[toid] = source_mass[fromid];
}
if (mask & MDDataItemMask_Charge){
charge[toid] = source_charge[fromid];
}
}
}
__global__ void Parallel::CudaGlobal::
packDeviceMDData_bondTop (const IndexType * cellIndex,
const IndexType * numAtomInCell,
const IndexType * cellStartIndex,
const MDDataItemMask_t mask,
const IndexType * source_numBond,
const IndexType * source_bondIndex,
const IndexType * source_bondNeighbor_globalIndex,
const IndexType * source_numAngle,
const IndexType * source_angleIndex,
const IndexType * source_anglePosi ,
const IndexType * source_angleNeighbor_globalIndex,
const IndexType * source_numDihedral,
const IndexType * source_dihedralIndex,
const IndexType * source_dihedralPosi ,
const IndexType * source_dihedralNeighbor_globalIndex,
const IndexType source_bondTopStride,
const IndexType maxNumBond,
const IndexType maxNumAngle,
const IndexType maxNumDihedral,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi ,
IndexType * angleNeighbor_globalIndex,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi ,
IndexType * dihedralNeighbor_globalIndex,
const IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType cellIdx = cellIndex[bid];
IndexType fromid = tid + cellIdx * blockDim.x;
IndexType toid = tid + cellStartIndex[bid];
if (tid < numAtomInCell[cellIdx]){
if ((mask & MDDataItemMask_Bond) && maxNumBond != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numBond[toid] = source_numBond[fromid];
for (IndexType k = 0; k < maxNumBond; ++k){
bondIndex[my_toid] = source_bondIndex[my_fromid];
bondNeighbor_globalIndex[my_toid] = source_bondNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
if ((mask & MDDataItemMask_Angle) && maxNumAngle != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numAngle[toid] = source_numAngle[fromid];
for (IndexType k = 0; k < maxNumAngle; ++k){
angleIndex[my_toid] = source_angleIndex[my_fromid];
anglePosi [my_toid] = source_anglePosi [my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
my_fromid = fromid;
my_toid = toid;
for (IndexType k = 0; k < maxNumAngle * 2; ++k){
angleNeighbor_globalIndex[my_toid] = source_angleNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
if ((mask & MDDataItemMask_Dihedral) && maxNumDihedral != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numDihedral[toid] = source_numDihedral[fromid];
for (IndexType k = 0; k < maxNumDihedral; ++k){
dihedralIndex[my_toid] = source_dihedralIndex[my_fromid];
dihedralPosi [my_toid] = source_dihedralPosi [my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
my_fromid = fromid;
my_toid = toid;
for (IndexType k = 0; k < maxNumDihedral * 3; ++k){
dihedralNeighbor_globalIndex[my_toid] = source_dihedralNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
}
}
void Parallel::DeviceTransferPackage::
copyToHost (HostTransferPackage & hpkg) const
{
HostMDData & hdata(hpkg);
const DeviceMDData & ddata(*this);
ddata.copyToHost (hdata, myMask);
// alloc memory
if (hpkg.getMemSize() < numCell){
hpkg.easyMallocMe (numCell * MemAllocExtension);
}
hpkg.getTotalNumCell() = numCell;
hpkg.getMask() = myMask;
for (IndexType i = 0; i < numCell; ++i){
hpkg.getCellIndex()[i] = hcellIndex[i];
hpkg.getCellStartIndex()[i] = hcellStartIndex[i];
}
hpkg.getCellStartIndex()[numCell] = hcellStartIndex[numCell];
}
void Parallel::DeviceTransferPackage::
copyFromHost (const HostTransferPackage & hpkg)
{
const HostMDData & hdata(hpkg);
DeviceMDData & ddata(*this);
myMask = hpkg.getMask();
ddata.copyFromHost (hdata, myMask);
if (memSize < hpkg.getTotalNumCell()){
easyMallocMe (hpkg.getTotalNumCell() * MemAllocExtension);
}
numCell = hpkg.getTotalNumCell();
for (IndexType i = 0; i < numCell; ++i){
hcellIndex[i] = hpkg.getCellIndex()[i];
hcellStartIndex[i] = hpkg.getCellStartIndex()[i];
}
hcellStartIndex[numCell] = hpkg.getCellStartIndex()[numCell];
cudaMemcpy (cellIndex, hcellIndex, sizeof(IndexType)*numCell,
cudaMemcpyHostToDevice);
cudaMemcpy (cellStartIndex, hcellStartIndex, sizeof(IndexType)*(numCell+1),
cudaMemcpyHostToDevice);
checkCUDAError ("DeviceTransferPackage::copyFromHost cpy from host");
}
void Parallel::DeviceTransferPackage::
unpack_replace (DeviceCellListedMDData & ddata) const
{
Parallel::CudaGlobal::unpackDeviceMDData_replace
<<<numCell, Parallel::Interface::numThreadsInCell()>>> (
cellIndex,
cellStartIndex,
myMask,
this->dptr_coordinate(),
this->dptr_coordinateNoi(),
this->dptr_velocityX(),
this->dptr_velocityY(),
this->dptr_velocityZ(),
this->dptr_forceX(),
this->dptr_forceY(),
this->dptr_forceZ(),
this->dptr_globalIndex(),
this->dptr_type(),
this->dptr_mass(),
this->dptr_charge(),
ddata.numAtomInCell,
ddata.dptr_coordinate(),
ddata.dptr_coordinateNoi(),
ddata.dptr_velocityX(),
ddata.dptr_velocityY(),
ddata.dptr_velocityZ(),
ddata.dptr_forceX(),
ddata.dptr_forceY(),
ddata.dptr_forceZ(),
ddata.dptr_globalIndex(),
ddata.dptr_type(),
ddata.dptr_mass(),
ddata.dptr_charge());
Parallel::CudaGlobal::unpackDeviceMDData_bondTop_replace
<<<numCell, Parallel::Interface::numThreadsInCell()>>>(
cellIndex,
cellStartIndex,
myMask,
this->dptr_numBond(),
this->dptr_bondIndex(),
this->dptr_bondNeighbor_globalIndex(),
this->dptr_numAngle(),
this->dptr_angleIndex(),
this->dptr_anglePosi(),
this->dptr_angleNeighbor_globalIndex(),
this->dptr_numDihedral(),
this->dptr_dihedralIndex(),
this->dptr_dihedralPosi(),
this->dptr_dihedralNeighbor_globalIndex(),
this->bondTopStride(),
this->getMaxNumBond(),
this->getMaxNumAngle(),
this->getMaxNumDihedral(),
ddata.dptr_numBond(),
ddata.dptr_bondIndex(),
ddata.dptr_bondNeighbor_globalIndex(),
ddata.dptr_numAngle(),
ddata.dptr_angleIndex(),
ddata.dptr_anglePosi(),
ddata.dptr_angleNeighbor_globalIndex(),
ddata.dptr_numDihedral(),
ddata.dptr_dihedralIndex(),
ddata.dptr_dihedralPosi(),
ddata.dptr_dihedralNeighbor_globalIndex(),
ddata.bondTopStride());
checkCUDAError ("DeviceTransferPackage::unpack_replace");
}
void Parallel::DeviceTransferPackage::
unpack_add (DeviceCellListedMDData & ddata) const
{
IndexType totalNumInCell = ddata.getNumCell().x * ddata.getNumCell().y * ddata.getNumCell().z;
IndexType * oldNumAtomInCell;
size_t size = totalNumInCell * sizeof(IndexType);
cudaMalloc ((void**)&oldNumAtomInCell, size);
checkCUDAError ("DeviceTransferPackage::unpack_add, malloc oldNumAtomInCell");
cudaMemcpy (oldNumAtomInCell, ddata.dptr_numAtomInCell(), size,
cudaMemcpyDeviceToDevice);
checkCUDAError ("DeviceTransferPackage::unpack_add, copy oldNumAtomInCell");
Parallel::CudaGlobal::unpackDeviceMDData_add
<<<numCell, Parallel::Interface::numThreadsInCell()>>> (
cellIndex,
cellStartIndex,
myMask,
this->dptr_coordinate(),
this->dptr_coordinateNoi(),
this->dptr_velocityX(),
this->dptr_velocityY(),
this->dptr_velocityZ(),
this->dptr_forceX(),
this->dptr_forceY(),
this->dptr_forceZ(),
this->dptr_globalIndex(),
this->dptr_type(),
this->dptr_mass(),
this->dptr_charge(),
ddata.dptr_numAtomInCell(),
ddata.dptr_coordinate(),
ddata.dptr_coordinateNoi(),
ddata.dptr_velocityX(),
ddata.dptr_velocityY(),
ddata.dptr_velocityZ(),
ddata.dptr_forceX(),
ddata.dptr_forceY(),
ddata.dptr_forceZ(),
ddata.dptr_globalIndex(),
ddata.dptr_type(),
ddata.dptr_mass(),
ddata.dptr_charge(),
err.ptr_de);
Parallel::CudaGlobal::unpackDeviceMDData_bondTop_add
<<<numCell, Parallel::Interface::numThreadsInCell()>>>(
cellIndex,
cellStartIndex,
oldNumAtomInCell,
myMask,
this->dptr_numBond(),
this->dptr_bondIndex(),
this->dptr_bondNeighbor_globalIndex(),
this->dptr_numAngle(),
this->dptr_angleIndex(),
this->dptr_anglePosi(),
this->dptr_angleNeighbor_globalIndex(),
this->dptr_numDihedral(),
this->dptr_dihedralIndex(),
this->dptr_dihedralPosi(),
this->dptr_dihedralNeighbor_globalIndex(),
this->bondTopStride(),
this->getMaxNumBond(),
this->getMaxNumAngle(),
this->getMaxNumDihedral(),
ddata.dptr_numBond(),
ddata.dptr_bondIndex(),
ddata.dptr_bondNeighbor_globalIndex(),
ddata.dptr_numAngle(),
ddata.dptr_angleIndex(),
ddata.dptr_anglePosi(),
ddata.dptr_angleNeighbor_globalIndex(),
ddata.dptr_numDihedral(),
ddata.dptr_dihedralIndex(),
ddata.dptr_dihedralPosi(),
ddata.dptr_dihedralNeighbor_globalIndex(),
ddata.bondTopStride(),
err.ptr_de);
checkCUDAError ("DeviceTransferPackage::unpack_add");
err.updateHost ();
cudaFree (oldNumAtomInCell);
err.check ("DeviceTransferPackage::unpack_add");
}
__global__ void Parallel::CudaGlobal::
unpackDeviceMDData_replace (const IndexType * cellIndex,
const IndexType * cellStartIndex,
const MDDataItemMask_t mask,
const CoordType * source_coord,
const CoordNoiType * source_coordNoi,
const ScalorType * source_velox,
const ScalorType * source_veloy,
const ScalorType * source_veloz,
const ScalorType * source_forcx,
const ScalorType * source_forcy,
const ScalorType * source_forcz,
const IndexType * source_globalIndex,
const TypeType * source_type,
const ScalorType * source_mass,
const ScalorType * source_charge,
IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType cellIdx = cellIndex[bid];
IndexType startIdx = cellStartIndex[bid];
IndexType numAtomInThisCell = cellStartIndex[bid+1] - startIdx;
IndexType toid = tid + cellIdx * blockDim.x;
IndexType fromid = tid + startIdx;
if (tid == blockDim.x-1){
numAtomInCell[cellIdx] = numAtomInThisCell;
}
if (tid < numAtomInThisCell){
if (mask & MDDataItemMask_Coordinate){
coord[toid] = source_coord[fromid];
}
if (mask & MDDataItemMask_CoordinateNoi){
coordNoi[toid].x = source_coordNoi[fromid].x;
coordNoi[toid].y = source_coordNoi[fromid].y;
coordNoi[toid].z = source_coordNoi[fromid].z;
}
if (mask & MDDataItemMask_Velocity){
velox[toid] = source_velox[fromid];
veloy[toid] = source_veloy[fromid];
veloz[toid] = source_veloz[fromid];
}
if (mask & MDDataItemMask_Force){
forcx[toid] = source_forcx[fromid];
forcy[toid] = source_forcy[fromid];
forcz[toid] = source_forcz[fromid];
}
if (mask & MDDataItemMask_GlobalIndex){
globalIndex[toid] = source_globalIndex[fromid];
}
if (mask & MDDataItemMask_Type){
type[toid] = source_type[fromid];
}
if (mask & MDDataItemMask_Mass){
mass[toid] = source_mass[fromid];
}
if (mask & MDDataItemMask_Charge){
charge[toid] = source_charge[fromid];
}
}
}
__global__ void Parallel::CudaGlobal::
unpackDeviceMDData_bondTop_replace (const IndexType * cellIndex,
const IndexType * cellStartIndex,
const MDDataItemMask_t mask,
const IndexType * source_numBond,
const IndexType * source_bondIndex,
const IndexType * source_bondNeighbor_globalIndex,
const IndexType * source_numAngle,
const IndexType * source_angleIndex,
const IndexType * source_anglePosi ,
const IndexType * source_angleNeighbor_globalIndex,
const IndexType * source_numDihedral,
const IndexType * source_dihedralIndex,
const IndexType * source_dihedralPosi ,
const IndexType * source_dihedralNeighbor_globalIndex,
const IndexType source_bondTopStride,
const IndexType maxNumBond,
const IndexType maxNumAngle,
const IndexType maxNumDihedral,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi ,
IndexType * angleNeighbor_globalIndex,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi ,
IndexType * dihedralNeighbor_globalIndex,
const IndexType bondTopStride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType cellIdx = cellIndex[bid];
IndexType startIdx = cellStartIndex[bid];
IndexType numAtomInThisCell = cellStartIndex[bid+1] - startIdx;
IndexType toid = tid + cellIdx * blockDim.x;
IndexType fromid = tid + startIdx;
if (tid < numAtomInThisCell){
if ((mask & MDDataItemMask_Bond) && maxNumBond != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numBond[toid] = source_numBond[fromid];
for (IndexType k = 0; k < maxNumBond; ++k){
bondIndex[my_toid] = source_bondIndex[my_fromid];
bondNeighbor_globalIndex[my_toid] = source_bondNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
if ((mask & MDDataItemMask_Angle) && maxNumAngle != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numAngle[toid] = source_numAngle[fromid];
for (IndexType k = 0; k < maxNumAngle; ++k){
angleIndex[my_toid] = source_angleIndex[my_fromid];
anglePosi [my_toid] = source_anglePosi [my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
my_fromid = fromid;
my_toid = toid;
for (IndexType k = 0; k < maxNumAngle * 2; ++k){
angleNeighbor_globalIndex[my_toid] = source_angleNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
if ((mask & MDDataItemMask_Dihedral) && maxNumDihedral != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numDihedral[toid] = source_numDihedral[fromid];
for (IndexType k = 0; k < maxNumDihedral; ++k){
dihedralIndex[my_toid] = source_dihedralIndex[my_fromid];
dihedralPosi [my_toid] = source_dihedralPosi [my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
my_fromid = fromid;
my_toid = toid;
for (IndexType k = 0; k < maxNumDihedral * 3; ++k){
dihedralNeighbor_globalIndex[my_toid] = source_dihedralNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
}
}
__global__ void Parallel::CudaGlobal::
unpackDeviceMDData_add (const IndexType * cellIndex,
const IndexType * cellStartIndex,
const MDDataItemMask_t mask,
const CoordType * source_coord,
const CoordNoiType * source_coordNoi,
const ScalorType * source_velox,
const ScalorType * source_veloy,
const ScalorType * source_veloz,
const ScalorType * source_forcx,
const ScalorType * source_forcy,
const ScalorType * source_forcz,
const IndexType * source_globalIndex,
const TypeType * source_type,
const ScalorType * source_mass,
const ScalorType * source_charge,
IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz,
ScalorType * forcx,
ScalorType * forcy,
ScalorType * forcz,
IndexType * globalIndex,
TypeType * type,
ScalorType * mass,
ScalorType * charge,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
__shared__ volatile IndexType tmpbuff[2];
if (tid < 2){
tmpbuff[tid] = cellStartIndex[bid+tid];
}
__syncthreads();
IndexType startIdx = tmpbuff[0];
IndexType numAdded = tmpbuff[1] - startIdx;
// IndexType startIdx = cellStartIndex[bid];
// IndexType numAdded = cellStartIndex[bid+1] - startIdx;
if (numAdded == 0) return;
IndexType cellIdx = cellIndex[bid];
IndexType alreadyInCell = numAtomInCell[cellIdx];
IndexType toid = tid + cellIdx * blockDim.x + alreadyInCell;
IndexType fromid = tid + startIdx;
// __shared__ IndexType numAtomInThisCell;
__syncthreads();
__shared__ volatile bool failed ;
if (tid == 0){
if (((numAtomInCell[cellIdx] += numAdded)) > blockDim.x &&
ptr_de != NULL){
*ptr_de = mdErrorShortCellList;
failed = true;
}
else {
failed = false;
}
}
__syncthreads();
if (failed) return;
if (tid < numAdded){
if (mask & MDDataItemMask_Coordinate){
coord[toid] = source_coord[fromid];
}
if (mask & MDDataItemMask_CoordinateNoi){
coordNoi[toid].x = source_coordNoi[fromid].x;
coordNoi[toid].y = source_coordNoi[fromid].y;
coordNoi[toid].z = source_coordNoi[fromid].z;
}
if (mask & MDDataItemMask_Velocity){
velox[toid] = source_velox[fromid];
veloy[toid] = source_veloy[fromid];
veloz[toid] = source_veloz[fromid];
}
if (mask & MDDataItemMask_Force){
forcx[toid] = source_forcx[fromid];
forcy[toid] = source_forcy[fromid];
forcz[toid] = source_forcz[fromid];
}
if (mask & MDDataItemMask_GlobalIndex){
globalIndex[toid] = source_globalIndex[fromid];
}
if (mask & MDDataItemMask_Type){
type[toid] = source_type[fromid];
}
if (mask & MDDataItemMask_Mass){
mass[toid] = source_mass[fromid];
}
if (mask & MDDataItemMask_Charge){
charge[toid] = source_charge[fromid];
}
}
}
__global__ void Parallel::CudaGlobal::
unpackDeviceMDData_bondTop_add (const IndexType * cellIndex,
const IndexType * cellStartIndex,
const IndexType * oldNumAtomInCell,
const MDDataItemMask_t mask,
const IndexType * source_numBond,
const IndexType * source_bondIndex,
const IndexType * source_bondNeighbor_globalIndex,
const IndexType * source_numAngle,
const IndexType * source_angleIndex,
const IndexType * source_anglePosi ,
const IndexType * source_angleNeighbor_globalIndex,
const IndexType * source_numDihedral,
const IndexType * source_dihedralIndex,
const IndexType * source_dihedralPosi ,
const IndexType * source_dihedralNeighbor_globalIndex,
const IndexType source_bondTopStride,
const IndexType maxNumBond,
const IndexType maxNumAngle,
const IndexType maxNumDihedral,
IndexType * numBond,
IndexType * bondIndex,
IndexType * bondNeighbor_globalIndex,
IndexType * numAngle,
IndexType * angleIndex,
IndexType * anglePosi ,
IndexType * angleNeighbor_globalIndex,
IndexType * numDihedral,
IndexType * dihedralIndex,
IndexType * dihedralPosi ,
IndexType * dihedralNeighbor_globalIndex,
const IndexType bondTopStride,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
__shared__ volatile IndexType tmpbuff[2];
if (tid < 2){
tmpbuff[tid] = cellStartIndex[bid+tid];
}
__syncthreads();
IndexType startIdx = tmpbuff[0];
IndexType numAdded = tmpbuff[1] - startIdx;
if (numAdded == 0) return;
IndexType cellIdx = cellIndex[bid];
IndexType toid = tid + cellIdx * blockDim.x + oldNumAtomInCell[cellIdx];
IndexType fromid = tid + startIdx;
if (tid < numAdded){
if ((mask & MDDataItemMask_Bond) && maxNumBond != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numBond[toid] = source_numBond[fromid];
for (IndexType k = 0; k < maxNumBond; ++k){
bondIndex[my_toid] = source_bondIndex[my_fromid];
bondNeighbor_globalIndex[my_toid] = source_bondNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
if ((mask & MDDataItemMask_Angle) && maxNumAngle != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numAngle[toid] = source_numAngle[fromid];
for (IndexType k = 0; k < maxNumAngle; ++k){
angleIndex[my_toid] = source_angleIndex[my_fromid];
anglePosi [my_toid] = source_anglePosi [my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
my_fromid = fromid;
my_toid = toid;
for (IndexType k = 0; k < maxNumAngle * 2; ++k){
angleNeighbor_globalIndex[my_toid] = source_angleNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
if ((mask & MDDataItemMask_Dihedral) && maxNumDihedral != 0) {
IndexType my_fromid = fromid;
IndexType my_toid = toid;
numDihedral[toid] = source_numDihedral[fromid];
for (IndexType k = 0; k < maxNumDihedral; ++k){
dihedralIndex[my_toid] = source_dihedralIndex[my_fromid];
dihedralPosi [my_toid] = source_dihedralPosi [my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
my_fromid = fromid;
my_toid = toid;
for (IndexType k = 0; k < maxNumDihedral * 3; ++k){
dihedralNeighbor_globalIndex[my_toid] = source_dihedralNeighbor_globalIndex[my_fromid];
my_fromid += source_bondTopStride;
my_toid += bondTopStride;
}
}
}
}
void Parallel::DeviceCellListedMDData::
copyToHost (HostCellListedMDData & hdata,
const MDDataItemMask_t mask) const
{
const DeviceMDData & ddata (*this);
ddata.copyToHost (hdata, mask);
hdata.rlist = rlist;
hdata.devideLevel = devideLevel;
hdata.numCell.x = numCell.x;
hdata.numCell.y = numCell.y;
hdata.numCell.z = numCell.z;
hdata.frameUp.x = frameUp.x;
hdata.frameUp.y = frameUp.y;
hdata.frameUp.z = frameUp.z;
hdata.frameLow.x = frameLow.x;
hdata.frameLow.y = frameLow.y;
hdata.frameLow.z = frameLow.z;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
size_t size = totalNumCell * sizeof (IndexType);
if (hdata.HostCellListedMDData::memSize < totalNumCell){
hdata.easyReallocCell (totalNumCell * MemAllocExtension);
}
cudaMemcpy (hdata.numAtomInCell, numAtomInCell, size,
cudaMemcpyDeviceToHost);
checkCUDAError ("DeviceCellListedMDData::copyToHost copy numAtomInCell");
}
void Parallel::DeviceCellListedMDData::
copyFromHost (const HostCellListedMDData & hdata,
const MDDataItemMask_t mask)
{
DeviceMDData & ddata(*this);
ddata.copyFromHost (hdata, mask);
rlist = hdata.rlist;
devideLevel = hdata.devideLevel;
numCell.x = hdata.numCell.x;
numCell.y = hdata.numCell.y;
numCell.z = hdata.numCell.z;
frameUp.x = hdata.frameUp.x;
frameUp.y = hdata.frameUp.y;
frameUp.z = hdata.frameUp.z;
frameLow.x = hdata.frameLow.x;
frameLow.y = hdata.frameLow.y;
frameLow.z = hdata.frameLow.z;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
size_t size = totalNumCell * sizeof (IndexType);
if (memSize < totalNumCell){
easyMallocCell (totalNumCell * MemAllocExtension);
}
cudaMemcpy (numAtomInCell, hdata.numAtomInCell, size,
cudaMemcpyHostToDevice);
checkCUDAError ("DeviceCellListedMDData::copyFromHost copy numAtomInCell");
}
void Parallel::DeviceCellListedMDData::
copyFromDevice (const DeviceCellListedMDData & ddata,
const MDDataItemMask_t mask)
{
DeviceMDData & me(*this);
me.copyFromDevice (ddata, mask);
rlist = ddata.rlist;
devideLevel = ddata.devideLevel;
numCell.x = ddata.numCell.x;
numCell.y = ddata.numCell.y;
numCell.z = ddata.numCell.z;
frameUp.x = ddata.frameUp.x;
frameUp.y = ddata.frameUp.y;
frameUp.z = ddata.frameUp.z;
frameLow.x = ddata.frameLow.x;
frameLow.y = ddata.frameLow.y;
frameLow.z = ddata.frameLow.z;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
size_t size = totalNumCell * sizeof (IndexType);
if (memSize < totalNumCell){
easyMallocCell (totalNumCell * MemAllocExtension);
}
cudaMemcpy (numAtomInCell, ddata.numAtomInCell, size,
cudaMemcpyDeviceToDevice);
checkCUDAError ("DeviceCellListedMDData::copyFromDevice copy numAtomInCell");
}
void Parallel::DeviceCellListedMDData::
clearData (const SubCellList & subList)
{
IndexType * tmpList;
IndexType * deviceList;
IndexType num = subList.size();
size_t size = sizeof(IndexType) * num;
tmpList = (IndexType * )malloc (size);
if (tmpList == NULL){
throw MDExcptFailedMallocOnHost ("DeviceCellListedMDData::clearData",
"tmpList", size);
}
cudaMalloc ((void**)&deviceList, size);
checkCUDAError ("DeviceCellListedMDData::clearData, malloc deviceList");
for (IndexType i = 0; i < num; ++i){
tmpList[i] = subList[i];
}
cudaMemcpy (deviceList, tmpList, size, cudaMemcpyHostToDevice);
freeAPointer ((void**)&tmpList);
Parallel::CudaGlobal::clearCellListData
<<<(num + DefaultNThreadPerBlock -1) / DefaultNThreadPerBlock,
DefaultNThreadPerBlock>>> (
deviceList,
num,
numAtomInCell);
checkCUDAError ("DeviceCellListedMDData::clearData, clearCellListData");
cudaFree (deviceList);
checkCUDAError ("DeviceCellListedMDData::clearData, free deviceList");
}
void __global__
Parallel::CudaGlobal::
clearCellListData (const IndexType * deviceList,
IndexType num,
IndexType * numAtomInCell)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
if (ii < num){
numAtomInCell[deviceList[ii]] = 0;
}
}
__global__ void Parallel::CudaGlobal::
normalizeSystem_CellListed (RectangularBox box,
const IndexType * numAtomInCell,
CoordType * coord,
CoordNoiType * coordNoi)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
if (tid < numAtomInCell[bid]) {
RectangularBoxGeometry::moveParticleToBox_1image (
box.size.x, &(coord[ii].x), &(coordNoi[ii].x));
RectangularBoxGeometry::moveParticleToBox_1image (
box.size.y, &(coord[ii].y), &(coordNoi[ii].y));
RectangularBoxGeometry::moveParticleToBox_1image (
box.size.z, &(coord[ii].z), &(coordNoi[ii].z));
}
}
void Parallel::DeviceCellListedMDData::
applyPeriodicBondaryCondition ()
{
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
IndexType numThreadsInCell = Parallel::Interface::numThreadsInCell ();
Parallel::CudaGlobal::normalizeSystem_CellListed
<<<totalNumCell, numThreadsInCell>>> (
globalBox,
numAtomInCell,
coord,
coordNoi);
checkCUDAError ("DeviceCellListedMDData::applyPeriodicBondaryCondition");
}
__global__ void Parallel::CudaGlobal::
normalizeSystemOnGhost_CellListed (RectangularBox box,
const IndexType * numAtomInCell,
const IntVectorType numCell,
const IndexType divideLevel,
const int nx,
const int ny,
const int nz,
const int rankx,
const int ranky,
const int rankz,
CoordType * coord,
CoordNoiType * coordNoi)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
IndexType this_numAtomInCell = numAtomInCell[bid];
IndexType ix, iy, iz;
Parallel::CudaDevice::D1toD3 (numCell, bid, ix, iy, iz);
if (rankx == 0 && ix < divideLevel){
if (tid < this_numAtomInCell){
coord[ii].x += box.size.x;
coordNoi[ii].x -= 1;
}
}
if (rankx == nx - 1 && ix >= numCell.x - divideLevel){
if (tid < this_numAtomInCell){
coord[ii].x -= box.size.x;
coordNoi[ii].x += 1;
}
}
if (ranky == 0 && iy < divideLevel){
if (tid < this_numAtomInCell){
coord[ii].y += box.size.y;
coordNoi[ii].y -= 1;
}
}
if (ranky == ny - 1 && iy >= numCell.y - divideLevel){
if (tid < this_numAtomInCell){
coord[ii].y -= box.size.y;
coordNoi[ii].y += 1;
}
}
if (rankz == 0 && iz < divideLevel){
if (tid < this_numAtomInCell){
coord[ii].z += box.size.z;
coordNoi[ii].z -= 1;
}
}
if (rankz == nz - 1 && iz >= numCell.z - divideLevel){
if (tid < this_numAtomInCell){
coord[ii].z -= box.size.z;
coordNoi[ii].z += 1;
}
}
// if (tid < numAtomInCell[bid]) {
// RectangularBoxGeometry::moveParticleToBox_1image (
// box.size.x, &(coord[ii].x), &(coordNoi[ii].x));
// RectangularBoxGeometry::moveParticleToBox_1image (
// box.size.y, &(coord[ii].y), &(coordNoi[ii].y));
// RectangularBoxGeometry::moveParticleToBox_1image (
// box.size.z, &(coord[ii].z), &(coordNoi[ii].z));
// }
}
void Parallel::DeviceCellListedMDData::
applyPeriodicBondaryConditionOnGhostCells ()
{
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
IndexType numThreadsInCell = Parallel::Interface::numThreadsInCell ();
int Nx, Ny, Nz;
Parallel::Interface::numProcDim (Nx, Ny, Nz);
int rankx, ranky, rankz;
Parallel::Interface::rankToCartCoord (Parallel::Interface::myRank(),
rankx, ranky, rankz);
Parallel::CudaGlobal::normalizeSystemOnGhost_CellListed
<<<totalNumCell, numThreadsInCell>>> (
globalBox,
numAtomInCell,
numCell,
devideLevel,
Nx, Ny, Nz,
rankx, ranky, rankz,
coord,
coordNoi);
checkCUDAError ("DeviceCellListedMDData::applyPeriodicBondaryCondition");
}
void Parallel::DeviceCellRelation::
easyMalloc (const IndexType & totalNumCell_,
const IndexType & MaxNeiPerCell_)
{
if (malloced){
clear ();
}
totalNumCell = totalNumCell_;
MaxNeiPerCell = MaxNeiPerCell_;
cudaMalloc ((void**)&numNeighbor, totalNumCell * sizeof(IndexType));
cudaMalloc ((void**)&neighborCellIndex,
totalNumCell * MaxNeiPerCell * sizeof (IndexType));
cudaMalloc ((void**)&neighborShiftNoi,
totalNumCell * MaxNeiPerCell * sizeof (CoordNoiType));
checkCUDAError ("DeviceCellRelation::easyMalloc");
malloced = true;
}
void Parallel::DeviceCellRelation::
clear ()
{
if (malloced){
cudaFree (numNeighbor);
cudaFree (neighborCellIndex);
cudaFree (neighborShiftNoi);
malloced = false;
}
}
Parallel::DeviceCellRelation::
~DeviceCellRelation ()
{
clear();
}
void Parallel::DeviceCellRelation::
rebuild (const DeviceCellListedMDData & list)
{
ptr_list = &list;
IntVectorType numCell = list.getNumCell ();
totalNumCell = numCell.x * numCell.y * numCell.z;
MaxNeiPerCell = 2 * list.getDevideLevel() + 1;
MaxNeiPerCell = MaxNeiPerCell * MaxNeiPerCell * MaxNeiPerCell;
int Nx, Ny, Nz;
Parallel::Interface::numProcDim (Nx, Ny, Nz);
easyMalloc (totalNumCell, MaxNeiPerCell);
int rankx, ranky, rankz;
Parallel::Interface::rankToCartCoord (Parallel::Interface::myRank(), rankx, ranky, rankz);
Parallel::CudaGlobal::buildCellNeighborhood
<<<toGridDim(totalNumCell), 1>>> (
numCell,
list.getDevideLevel(),
list.getRlist(),
list.getGlobalBoxSize(),
rankx, ranky, rankz,
Nx, Ny, Nz,
numNeighbor,
neighborCellIndex,
neighborShiftNoi,
MaxNeiPerCell);
checkCUDAError ("DeviceCellRelation::build, buildCellNeighborhood");
}
__global__ void Parallel::CudaGlobal::
buildCellNeighborhood (const IntVectorType numCell,
const IndexType devideLevel,
const ScalorType rlist,
const HostVectorType boxSize,
IndexType * numNeighbor,
IndexType * neighborCellIndex,
const IndexType stride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
__shared__ bool stop;
numNeighbor[bid] = 0;
int centerx, centery, centerz;
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (numCell.x == 3) oneCellX = true;
if (numCell.y == 3) oneCellY = true;
if (numCell.z == 3) oneCellZ = true;
if (tid == 0){
stop = false;
Parallel::CudaDevice::D1toD3 (numCell, int(bid), centerx, centery, centerz);
if (oneCellX){
if (centerx == 0 || centerx == 2){
stop = true;
}
}
else {
if (centerx < devideLevel || centerx >= numCell.x - devideLevel){
stop = true;
}
}
if (oneCellY){
if (centery == 0 || centery == 2){
stop = true;
}
}
else {
if (centery < devideLevel || centery >= numCell.y - devideLevel){
stop = true;
}
}
if (oneCellZ){
if (centerz == 0 || centerz == 2){
stop = true;
}
}
else {
if (centerz < devideLevel || centerz >= numCell.z - devideLevel){
stop = true;
}
}
}
__syncthreads();
if (stop) return;
if (tid == 0){
int lowerX (-devideLevel);
int lowerY (-devideLevel);
int lowerZ (-devideLevel);
if (oneCellX) lowerX = -1;
if (oneCellY) lowerY = -1;
if (oneCellZ) lowerZ = -1;
int upperX (devideLevel+1);
int upperY (devideLevel+1);
int upperZ (devideLevel+1);
if (oneCellX) upperX = 2;
if (oneCellY) upperY = 2;
if (oneCellZ) upperZ = 2;
ScalorType scalorx, scalory, scalorz;
oneCellX ? scalorx = boxSize.x :
scalorx = boxSize.x / ScalorType(numCell.x - (devideLevel << 1));
oneCellY ? scalory = boxSize.y :
scalory = boxSize.y / ScalorType(numCell.y - (devideLevel << 1));
oneCellZ ? scalorz = boxSize.z :
scalorz = boxSize.z / ScalorType(numCell.z - (devideLevel << 1));
ScalorType rlist2 = rlist * rlist;
for (int ix = lowerX; ix < upperX; ++ix){
for (int iy = lowerY; iy < upperY; ++iy){
for (int iz = lowerZ; iz < upperZ; ++iz){
int myx = ix + int(centerx);
int myy = iy + int(centery);
int myz = iz + int(centerz);
ScalorType min = 1e9;
#pragma unroll 27
for (int dx = -1; dx <= 1; ++dx){
for (int dy = -1; dy <= 1; ++dy){
for (int dz = -1; dz <= 1; ++dz){
ScalorType diffx ((-centerx + myx + dx) * scalorx);
ScalorType diffy ((-centery + myy + dy) * scalory);
ScalorType diffz ((-centerz + myz + dz) * scalorz);
// shortestImage (box, &diffx, &diffy, &diffz);
ScalorType diff2 (diffx * diffx + diffy * diffy + diffz * diffz);
if (diff2 < min){
min = diff2;
}
}
}
}
if (min < rlist2){
IndexType tmp = Parallel::CudaDevice::D3toD1 (numCell, myx, myy, myz);
neighborCellIndex[(numNeighbor[bid]++) + bid * stride] = tmp;
}
}
}
}
}
}
__global__ void Parallel::CudaGlobal::
buildCellNeighborhood (const IntVectorType numCell,
const IndexType devideLevel,
const ScalorType rlist,
const HostVectorType globalBoxSize,
const int rankx,
const int ranky,
const int rankz,
const int nProcDimx,
const int nProcDimy,
const int nProcDimz,
IndexType * numNeighbor,
IndexType * neighborCellIndex,
CoordNoiType * neighborShiftNoi,
const IndexType stride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
__shared__ bool stop;
numNeighbor[bid] = 0;
int centerx, centery, centerz;
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (numCell.x == 3) oneCellX = true;
if (numCell.y == 3) oneCellY = true;
if (numCell.z == 3) oneCellZ = true;
dim3 boxSize;
if (tid == 0){
boxSize.x = globalBoxSize.x / nProcDimx;
boxSize.y = globalBoxSize.y / nProcDimy;
boxSize.z = globalBoxSize.z / nProcDimz;
stop = false;
Parallel::CudaDevice::D1toD3 (numCell, int(bid), centerx, centery, centerz);
if (oneCellX){
if (centerx == 0 || centerx == 2){
stop = true;
}
}
else {
if (centerx < devideLevel || centerx >= numCell.x - devideLevel){
stop = true;
}
}
if (oneCellY){
if (centery == 0 || centery == 2){
stop = true;
}
}
else {
if (centery < devideLevel || centery >= numCell.y - devideLevel){
stop = true;
}
}
if (oneCellZ){
if (centerz == 0 || centerz == 2){
stop = true;
}
}
else {
if (centerz < devideLevel || centerz >= numCell.z - devideLevel){
stop = true;
}
}
}
__syncthreads();
if (stop) return;
if (tid == 0){
int lowerX (-devideLevel);
int lowerY (-devideLevel);
int lowerZ (-devideLevel);
if (oneCellX) lowerX = -1;
if (oneCellY) lowerY = -1;
if (oneCellZ) lowerZ = -1;
int upperX (devideLevel+1);
int upperY (devideLevel+1);
int upperZ (devideLevel+1);
if (oneCellX) upperX = 2;
if (oneCellY) upperY = 2;
if (oneCellZ) upperZ = 2;
ScalorType scalorx, scalory, scalorz;
oneCellX ? scalorx = boxSize.x :
scalorx = boxSize.x / ScalorType(numCell.x - (devideLevel << 1));
oneCellY ? scalory = boxSize.y :
scalory = boxSize.y / ScalorType(numCell.y - (devideLevel << 1));
oneCellZ ? scalorz = boxSize.z :
scalorz = boxSize.z / ScalorType(numCell.z - (devideLevel << 1));
ScalorType rlist2 = rlist * rlist;
CoordNoiType myshift ;
for (int ix = lowerX; ix < upperX; ++ix){
for (int iy = lowerY; iy < upperY; ++iy){
for (int iz = lowerZ; iz < upperZ; ++iz){
int myx = ix + int(centerx);
int myy = iy + int(centery);
int myz = iz + int(centerz);
myshift.x = myshift.y = myshift.z = 0;
if (rankx == 0 && myx < devideLevel) {
myshift.x = - 1;
}
else if (rankx == nProcDimx - 1 && myx >= int(numCell.x - devideLevel)){
myshift.x = 1;
}
if (ranky == 0 && myy < devideLevel) {
myshift.y = - 1;
}
else if (ranky == nProcDimy - 1 && myy >= int(numCell.y - devideLevel)){
myshift.y = 1;
}
if (rankz == 0 && myz < devideLevel) {
myshift.z = - 1;
}
else if (rankz == nProcDimz - 1 && myz >= int(numCell.z - devideLevel)){
myshift.z = 1;
}
ScalorType min = 1e9;
#pragma unroll 27
for (int dx = -1; dx <= 1; ++dx){
for (int dy = -1; dy <= 1; ++dy){
for (int dz = -1; dz <= 1; ++dz){
ScalorType diffx ((-centerx + myx + dx) * scalorx);
ScalorType diffy ((-centery + myy + dy) * scalory);
ScalorType diffz ((-centerz + myz + dz) * scalorz);
// shortestImage (box, &diffx, &diffy, &diffz);
ScalorType diff2 (diffx * diffx + diffy * diffy + diffz * diffz);
if (diff2 < min){
min = diff2;
}
}
}
}
if (min < rlist2){
IndexType tmp = Parallel::CudaDevice::D3toD1 (numCell, myx, myy, myz);
neighborShiftNoi [(numNeighbor[bid] ) + bid * stride] = myshift;
neighborCellIndex[(numNeighbor[bid]++) + bid * stride] = tmp;
}
}
}
}
}
}
void Parallel::DeviceCellRelation::
copyToHost (HostCellRelation & hrelation)
{
hrelation.easyMalloc (totalNumCell, MaxNeiPerCell);
cudaMemcpy (hrelation.cptr_numNeighborCell(),
numNeighbor,
sizeof(IndexType) * totalNumCell,
cudaMemcpyDeviceToHost);
cudaMemcpy (hrelation.cptr_neighborCellIndex(),
neighborCellIndex,
sizeof(IndexType) * totalNumCell * MaxNeiPerCell,
cudaMemcpyDeviceToHost);
cudaMemcpy (hrelation.cptr_neighborShiftNoi(),
neighborShiftNoi,
sizeof(CoordNoiType) * totalNumCell * MaxNeiPerCell,
cudaMemcpyDeviceToHost);
checkCUDAError ("DeviceCellRelation::copyToHost");
}
void Parallel::DeviceCellRelation::
rebuild (const DeviceCellListedMDData & list,
const SubCellList & sub0,
const SubCellList & sub1)
{
IndexType * tmpHost;
IndexType * deviceSub0;
IndexType * deviceSub1;
size_t size0 = sizeof (IndexType) * sub0.size();
size_t size1 = sizeof (IndexType) * sub1.size();
tmpHost = (IndexType *) malloc (size0);
if (tmpHost == NULL){
throw MDExcptFailedMallocOnHost ("DeviceCellListedMDData::build",
"tmpHost", size0);
}
cudaMalloc ((void**)&deviceSub0, size0);
checkCUDAError ("DeviceCellListedMDData::build, malloc deviceSub0");
for (IndexType i = 0; i < sub0.size(); ++i){
tmpHost[i] = sub0[i];
}
cudaMemcpy (deviceSub0, tmpHost, size0, cudaMemcpyHostToDevice);
checkCUDAError ("DeviceCellListedMDData::build, copy to deviceSub0");
free (tmpHost);
tmpHost = (IndexType *) malloc (size1);
if (tmpHost == NULL){
throw MDExcptFailedMallocOnHost ("DeviceCellListedMDData::build",
"tmpHost", size1);
}
cudaMalloc ((void**)&deviceSub1, size1);
checkCUDAError ("DeviceCellListedMDData::build, malloc deviceSub1");
for (IndexType i = 0; i < sub1.size(); ++i){
tmpHost[i] = sub1[i];
}
cudaMemcpy (deviceSub1, tmpHost, size1, cudaMemcpyHostToDevice);
checkCUDAError ("DeviceCellListedMDData::build, copy to deviceSub1");
free (tmpHost);
ptr_list = & list;
IntVectorType numCell = list.getNumCell ();
totalNumCell = numCell.x * numCell.y * numCell.z;
MaxNeiPerCell = 2 * list.getDevideLevel() + 1;
MaxNeiPerCell = MaxNeiPerCell * MaxNeiPerCell * MaxNeiPerCell;
int Nx, Ny, Nz;
Parallel::Interface::numProcDim (Nx, Ny, Nz);
easyMalloc (totalNumCell, MaxNeiPerCell);
int rankx, ranky, rankz;
Parallel::Interface::rankToCartCoord
(Parallel::Interface::myRank(), rankx, ranky, rankz);
checkCUDAError ("DeviceCellRelation::build, buildCellNeighborhood");
Parallel::Auxiliary::setValue
<<<toGridDim(totalNumCell / DefaultNThreadPerBlock + 1),
DefaultNThreadPerBlock>>> (
numNeighbor, totalNumCell, IndexType(0));
Parallel::CudaGlobal::buildCellNeighborhood
<<<sub0.size(), 1>>> (
numCell,
list.getDevideLevel(),
list.getRlist(),
list.getGlobalBoxSize(),
rankx, ranky, rankz,
Nx, Ny, Nz,
deviceSub0,
sub0.size(),
deviceSub1,
sub1.size(),
numNeighbor,
neighborCellIndex,
neighborShiftNoi,
MaxNeiPerCell);
cudaFree(deviceSub0);
cudaFree(deviceSub1);
checkCUDAError ("DeviceCellListedMDData::build, cuda free");
}
__global__ void Parallel::CudaGlobal::
buildCellNeighborhood (const IntVectorType numCell,
const IndexType devideLevel,
const ScalorType rlist,
const HostVectorType globalBoxSize,
const int rankx,
const int ranky,
const int rankz,
const int nProcDimx,
const int nProcDimy,
const int nProcDimz,
const IndexType * subList0,
const IndexType length0,
const IndexType * subList1,
const IndexType length1,
IndexType * numNeighbor,
IndexType * neighborCellIndex,
CoordNoiType * neighborShiftNoi,
const IndexType stride)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
if (bid >= length0) return ;
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (numCell.x == 3) oneCellX = true;
if (numCell.y == 3) oneCellY = true;
if (numCell.z == 3) oneCellZ = true;
dim3 boxSize;
ScalorType scalorx, scalory, scalorz;
if (tid == 0){
boxSize.x = globalBoxSize.x / nProcDimx;
boxSize.y = globalBoxSize.y / nProcDimy;
boxSize.z = globalBoxSize.z / nProcDimz;
oneCellX ? scalorx = boxSize.x :
scalorx = boxSize.x / ScalorType(numCell.x - (devideLevel << 1));
oneCellY ? scalory = boxSize.y :
scalory = boxSize.y / ScalorType(numCell.y - (devideLevel << 1));
oneCellZ ? scalorz = boxSize.z :
scalorz = boxSize.z / ScalorType(numCell.z - (devideLevel << 1));
}
IndexType my_cellIndex = subList0[bid];
IntScalorType my_cellIndexx, my_cellIndexy, my_cellIndexz;
if (tid == 0){
Parallel::CudaDevice::D1toD3 (numCell,
int(my_cellIndex),
my_cellIndexx, my_cellIndexy, my_cellIndexz);
ScalorType rlist2 = rlist * rlist;
for (IndexType ii = 0; ii < length1; ++ii){
IndexType target_cellIndex = subList1[ii];
IntScalorType target_cellIndexx, target_cellIndexy, target_cellIndexz;
Parallel::CudaDevice::D1toD3 (numCell,
int(target_cellIndex),
target_cellIndexx,
target_cellIndexy,
target_cellIndexz);
if (abs(target_cellIndexx - my_cellIndexx) > devideLevel ||
abs(target_cellIndexy - my_cellIndexy) > devideLevel ||
abs(target_cellIndexz - my_cellIndexz) > devideLevel ){
continue;
}
CoordNoiType myshift ;
myshift.x = myshift.y = myshift.z = 0;
if (rankx == 0 &&
target_cellIndexx < devideLevel) {
myshift.x = - 1;
}
else if (rankx == nProcDimx - 1 &&
target_cellIndexx >= int(numCell.x - devideLevel)){
myshift.x = 1;
}
if (ranky == 0 &&
target_cellIndexy < devideLevel) {
myshift.y = - 1;
}
else if (ranky == nProcDimy - 1 &&
target_cellIndexy >= int(numCell.y - devideLevel)){
myshift.y = 1;
}
if (rankz == 0 &&
target_cellIndexz < devideLevel) {
myshift.z = - 1;
}
else if (rankz == nProcDimz - 1 &&
target_cellIndexz >= int(numCell.z - devideLevel)){
myshift.z = 1;
}
ScalorType min = 1e9;
for (int dx = -1; dx <= 1; ++dx){
for (int dy = -1; dy <= 1; ++dy){
for (int dz = -1; dz <= 1; ++dz){
ScalorType diffx ((-my_cellIndexx + target_cellIndexx + dx) * scalorx);
ScalorType diffy ((-my_cellIndexy + target_cellIndexy + dy) * scalory);
ScalorType diffz ((-my_cellIndexz + target_cellIndexz + dz) * scalorz);
ScalorType diff2 (diffx * diffx + diffy * diffy + diffz * diffz);
if (diff2 < min){
min = diff2;
}
}
}
}
if (min < rlist2){
neighborShiftNoi [(numNeighbor[my_cellIndex] ) + my_cellIndex * stride]
= myshift;
neighborCellIndex[(numNeighbor[my_cellIndex]++) + my_cellIndex * stride]
= target_cellIndex;
}
}
}
}
// __global__ void Parallel::CudaGlobal::
// buildCellNeighborhood (const IntVectorType numCell,
// const IndexType devideLevel,
// const ScalorType rlist,
// const HostVectorType globalBoxSize,
// const int rankx,
// const int ranky,
// const int rankz,
// const int nProcDimx,
// const int nProcDimy,
// const int nProcDimz,
// const IndexType * subList0,
// const IndexType length0,
// const IndexType * subList1,
// const IndexType length1,
// IndexType * numNeighbor,
// IndexType * neighborCellIndex,
// const IndexType stride)
// {
// IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
// IndexType tid = threadIdx.x;
// if (bid >= length0) return ;
// int centerx, centery, centerz;
// if (tid == 0){
// HostVectorType boxSize;
// boxSize.x = globalBoxSize.x / nProcDimx;
// boxSize.y = globalBoxSize.y / nProcDimy;
// boxSize.z = globalBoxSize.z / nProcDimz;
// ScalorType rlist2 = rlist;
// numNeighbor[bid] = 0;
// Parallel::CudaDevice::D1toD3 (numCell, int(bid), centerx, centery, centerz);
// bool oneCellX(false), oneCellY(false), oneCellZ(false);
// if (numCell.x == 3) oneCellX = true;
// if (numCell.y == 3) oneCellY = true;
// if (numCell.z == 3) oneCellZ = true;
// ScalorType scalorx, scalory, scalorz;
// oneCellX ? scalorx = boxSize.x :
// scalorx = boxSize.x / ScalorType(numCell.x - (devideLevel << 1));
// oneCellY ? scalory = boxSize.y :
// scalory = boxSize.y / ScalorType(numCell.y - (devideLevel << 1));
// oneCellZ ? scalorz = boxSize.z :
// scalorz = boxSize.z / ScalorType(numCell.z - (devideLevel << 1));
// int targetx, targety, targetz;
// IndexType targetIndex;
// for (IndexType i = 0; i < length1; ++i){
// targetIndex = subList1[i];
// Parallel::CudaDevice::D1toD3 (numCell, int(targetIndex),
// targetx, targety, targetz);
// ScalorType min = 1e9;
// #pragma unroll 27
// for (int dx = -1; dx <= 1; ++dx){
// for (int dy = -1; dy <= 1; ++dy){
// for (int dz = -1; dz <= 1; ++dz){
// ScalorType diffx ((-centerx + targetx + dx) * scalorx);
// ScalorType diffy ((-centery + targety + dy) * scalory);
// ScalorType diffz ((-centerz + targetz + dz) * scalorz);
// // shortestImage (box, &diffx, &diffy, &diffz);
// ScalorType diff2 (diffx * diffx + diffy * diffy + diffz * diffz);
// if (diff2 < min){
// min = diff2;
// }
// }
// }
// }
// if (min < rlist2){
// IndexType tmp = Parallel::CudaDevice::D3toD1 (numCell,
// targetx, targety, targetz);
// neighborCellIndex[(numNeighbor[bid]++) + bid * stride] = tmp;
// }
// }
// }
// }
__global__ void Parallel::CudaGlobal::
rescaleCoordinate (const IndexType * numAtomInCell,
const HostVectorType scale,
CoordType * coord)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType this_numAtomInCell = numAtomInCell[bid];
IndexType ii = threadIdx.x + bid * blockDim.x;
if (threadIdx.x < this_numAtomInCell){
coord[ii].x *= scale.x;
coord[ii].y *= scale.y;
coord[ii].z *= scale.z;
}
}
__global__ void Parallel::CudaGlobal::
rescaleVelocity (const IndexType * numAtomInCell,
const HostVectorType scale,
ScalorType * velox,
ScalorType * veloy,
ScalorType * veloz)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType this_numAtomInCell = numAtomInCell[bid];
IndexType ii = threadIdx.x + bid * blockDim.x;
if (threadIdx.x < this_numAtomInCell){
velox[ii] *= scale.x;
veloy[ii] *= scale.y;
veloz[ii] *= scale.z;
}
}
void Parallel::DeviceCellListedMDData::
rescaleCoordinate (const HostVectorType & scale)
{
HostVectorType globalBoxSize = getGlobalBoxSize();
IntVectorType numCell = getNumCell();
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
dim3 gridDim = toGridDim (totalNumCell);
Parallel::CudaGlobal::rescaleCoordinate
<<<gridDim, Parallel::Interface::numThreadsInCell()>>>(
numAtomInCell,
scale,
coord);
checkCUDAError ("DeviceCellListedMDData::rescaleCoordinate");
globalBoxSize.x *= scale.x;
globalBoxSize.y *= scale.y;
globalBoxSize.z *= scale.z;
setGlobalBox (globalBoxSize.x, globalBoxSize.y, globalBoxSize.z);
frameLow.x *= scale.x;
frameLow.y *= scale.y;
frameLow.z *= scale.z;
frameUp.x *= scale.x;
frameUp.y *= scale.y;
frameUp.z *= scale.z;
}
void Parallel::DeviceCellListedMDData::
rescaleVelocity (const HostVectorType & scale)
{
HostVectorType globalBoxSize = getGlobalBoxSize();
IntVectorType numCell = getNumCell();
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
dim3 gridDim = toGridDim (totalNumCell);
Parallel::CudaGlobal::rescaleVelocity
<<<gridDim, Parallel::Interface::numThreadsInCell()>>>(
numAtomInCell,
scale,
velox, veloy, veloz);
checkCUDAError ("DeviceCellListedMDData::rescaleVelocity");
}
void Parallel::DeviceCellListedMDData::
mallocFromHost (const HostCellListedMDData & hdata)
{
DeviceMDData::mallocFromHost (hdata);
rlist = hdata.getRlist();
devideLevel = hdata.getDevideLevel();
numCell.x = hdata.getNumCell().x;
numCell.y = hdata.getNumCell().y;
numCell.z = hdata.getNumCell().z;
frameLow.x = hdata.getFrameLow().x;
frameLow.y = hdata.getFrameLow().y;
frameLow.z = hdata.getFrameLow().z;
frameUp.x = hdata.getFrameUp().x;
frameUp.y = hdata.getFrameUp().y;
frameUp.z = hdata.getFrameUp().z;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
easyMallocCell (totalNumCell);
setValue <<<totalNumCell / DefaultNThreadPerBlock + 1, DefaultNThreadPerBlock>>>
(numAtomInCell, totalNumCell, IndexType(0));
}
void Parallel::DeviceCellListedMDData::
mallocToHost (HostCellListedMDData & hdata) const
{
DeviceMDData::mallocToHost (hdata);
hdata.rlist = rlist;
hdata.devideLevel = devideLevel;
hdata.frameLow.x = frameLow.x;
hdata.frameLow.y = frameLow.y;
hdata.frameLow.z = frameLow.z;
hdata.frameUp.x = frameUp.x;
hdata.frameUp.y = frameUp.y;
hdata.frameUp.z = frameUp.z;
hdata.numCell.x = numCell.x;
hdata.numCell.y = numCell.y;
hdata.numCell.z = numCell.z;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
hdata.easyReallocCell (totalNumCell);
for (IndexType i = 0; i < totalNumCell; ++i){
hdata.numAtomInCell[i] = 0;
}
}
void Parallel::DeviceCellListedMDData::
mallocFromDevice (const DeviceCellListedMDData & ddata)
{
DeviceMDData::mallocFromDevice (ddata);
rlist = ddata.getRlist();
devideLevel = ddata.getDevideLevel();
numCell.x = ddata.getNumCell().x;
numCell.y = ddata.getNumCell().y;
numCell.z = ddata.getNumCell().z;
frameLow.x = ddata.getFrameLow().x;
frameLow.y = ddata.getFrameLow().y;
frameLow.z = ddata.getFrameLow().z;
frameUp.x = ddata.getFrameUp().x;
frameUp.y = ddata.getFrameUp().y;
frameUp.z = ddata.getFrameUp().z;
IndexType totalNumCell = numCell.x * numCell.y * numCell.z;
easyMallocCell (totalNumCell);
setValue <<<totalNumCell / DefaultNThreadPerBlock + 1, DefaultNThreadPerBlock>>>
(numAtomInCell, totalNumCell, IndexType(0));
}
|
8fbfa23afab3865c4dfada8f8d14e3ff02b22507.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "jacobi.h"
#include "alloc3d_gpu.h"
#include "transfer3d_gpu.h"
#include "alloc3d.h"
#include <hip/hip_runtime_api.h>
#include <helper_cuda.h>
#define BLOCK_SIZE 8
void
assign_ufu_old(double*** u, double*** f, double*** u_old, int N, double start_T){
int radxi = 0,
radxf = (5 * N)/16, // (-3/8 + 1) * N/2
radyi = 0,
radyf = N/4, // (_1/2 + 1) * N/2
radzi = N/6 + (N%6 > 0), // (-2/3 + 1) * N/2 truncating upwards if there's some remainder.
radzf = N/2; // (0 + 1) * N/2
int i,j,k;
for (i=0; i<N; i++){
for (j=0;j<N;j++){
for (k=0; k<N; k++){
u_old[i][j][k]=start_T;
u[i][j][k]=start_T;
f[i][j][k]=0;
}
}
}
for(i=0;i<N;i++){
for(j=0;j<N;j++){
u_old[i][0][j] = 0.;
u_old[i][N-1][j]=20.;
u[i][0][j] = 0.;
u[i][N-1][j]=20.;
u_old[i][j][0]=20.;
u_old[i][j][N-1]=20.;
u[i][j][0]=20.;
u[i][j][N-1]=20.;
u_old[0][i][j]=20.;
u_old[N-1][i][j]=20.;
u[0][i][j]=20.;
u[N-1][i][j]=20.;
}
}
// printf("X: %d - %d. Y: %d - %d. Z: %d - %d\n", radxi, radxf, radyi, radyf, radzi, radzf);
for (i = radxi; i <= radxf; i++) {
for (j = radyi; j <= radyf; j++) {
for (k = radzi; k <= radzf; k++) {
f[i][j][k] = 200;
}
}
}
}
void print_matrix(double*** A, int N){
int i,j,k;
for (i=0; i<N; i++){
printf("\n %d -th Layer \n", i);
for(j=0; j<N; j++){
for(k=0; k<N; k++){
printf("%lf \t", A[i][j][k]);
}
printf("\n");
}
}
}
int main(int argc, char *argv[]){
int N = 5;
int iter_max = 10;
double tolerance = 5; //possibly initialize to avoid segmentation error
double start_T;
int output_type = 0;
char *output_prefix = "poisson_res";
char *output_ext = "";
char output_filename[FILENAME_MAX];
double ***u_h = NULL;
double ***f_h = NULL;
double ***prev_u_h=NULL;
double ***u_d=NULL;
double ***f_d=NULL;
double ***prev_u_d=NULL;
/* get the paramters from the command line */
N = atoi(argv[1]); // grid size
iter_max = atoi(argv[2]); // max. no. of iterations
start_T = atof(argv[3]); // start T for all inner grid points
if (argc == 5) {
output_type = atoi(argv[4]); // ouput type
}
//ON CPU
//allocate memory
if ( (u_h = d_malloc_3d(N, N, N)) == NULL ) {
perror("array u: allocation failed");
exit(-1);
}
//allocate f
if ( (f_h = d_malloc_3d(N, N, N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
if ( (prev_u_h = d_malloc_3d(N, N, N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
//ON GPU 1
checkCudaErrors(hipSetDevice(0));
if ( (u_d = d_malloc_3d_gpu(N, N, N)) == NULL ) {
perror("array u: allocation failed");
exit(-1);
}
if ( (f_d = d_malloc_3d_gpu(N,N,N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
if ( (prev_u_d = d_malloc_3d_gpu(N, N, N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
printf("We are going to start the Jacobi-Iteration \n");
assign_ufu_old(u_h,f_h,prev_u_h,N, start_T);
//create event in order to time
hipEvent_t start, stop;
float elapsed;
hipEventCreate(&start); hipEventCreate(&stop);
//copy assigned Matrices to GPU
transfer_3d(u_d, u_h, N,N,N, hipMemcpyHostToDevice);
transfer_3d(prev_u_d, prev_u_h, N,N,N, hipMemcpyHostToDevice);
transfer_3d(f_d,f_h, N,N,N, hipMemcpyHostToDevice);
//start for-loop
double step_width=2./(double)N;
double denominator = 1.0/(double)6;
int i;
double ***swap;
//create 3-dimensional grid
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
//reduce size of grid for smaller matrix
dim3 grid_size(((N-2)*0.5+block_size.x-1)/block_size.x, ((N-2)+block_size.y-1)/ block_size.y, ((N-2)+block_size.z-1)/block_size.z);
//enable peer access between devices
hipSetDevice(0);
checkCudaErrors(hipDeviceEnablePeerAccess(1,0));
hipSetDevice(1);
checkCudaErrors(hipDeviceEnablePeerAccess(0,0));
//start iteration
hipEventRecord(start,0);
for (i=0; i< iter_max; i++){
printf("At iteration Nr %d \n", i);
checkCudaErrors(hipSetDevice(0));
hipLaunchKernelGGL(( jacobi_gpu2), dim3(grid_size),dim3(block_size), 0, 0, u_d,prev_u_d,f_d,N, step_width, denominator, 0);
hipDeviceSynchronize();
hipSetDevice(1);
hipLaunchKernelGGL(( jacobi_gpu2), dim3(grid_size),dim3(block_size), 0, 0, u_d,prev_u_d,f_d,N, step_width, denominator, 1);
hipDeviceSynchronize();
swap = u_d;
u_d = prev_u_d;
prev_u_d = swap;
}
hipEventRecord(stop,0);
//copy matrices back to CPU
hipSetDevice(0);
transfer_3d(u_h, u_d, N,N,N, hipMemcpyDeviceToHost);
transfer_3d(prev_u_h, prev_u_d, N,N,N, hipMemcpyDeviceToHost);
//print_matrix(u_h,N);
//print_matrix(prev_u_h,N);
//stop and destroy events
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
hipEventDestroy(start); hipEventDestroy(stop);
printf("%lf \n",elapsed);
// dump results if wanted
switch(output_type) {
case 0:
// no output at all
break;
case 2:
print_matrix(prev_u_h,N);
break;
default:
fprintf(stderr, "Non-supported output type!\n");
break;
}
// de-allocate memory
free(u_h);
free(prev_u_h);
free(f_h);
free_gpu(u_d);
free_gpu(prev_u_d);
free_gpu(f_d);
return(0);
}
| 8fbfa23afab3865c4dfada8f8d14e3ff02b22507.cu | #include <stdio.h>
#include "jacobi.h"
#include "alloc3d_gpu.h"
#include "transfer3d_gpu.h"
#include "alloc3d.h"
#include <cuda_runtime_api.h>
#include <helper_cuda.h>
#define BLOCK_SIZE 8
void
assign_ufu_old(double*** u, double*** f, double*** u_old, int N, double start_T){
int radxi = 0,
radxf = (5 * N)/16, // (-3/8 + 1) * N/2
radyi = 0,
radyf = N/4, // (_1/2 + 1) * N/2
radzi = N/6 + (N%6 > 0), // (-2/3 + 1) * N/2 truncating upwards if there's some remainder.
radzf = N/2; // (0 + 1) * N/2
int i,j,k;
for (i=0; i<N; i++){
for (j=0;j<N;j++){
for (k=0; k<N; k++){
u_old[i][j][k]=start_T;
u[i][j][k]=start_T;
f[i][j][k]=0;
}
}
}
for(i=0;i<N;i++){
for(j=0;j<N;j++){
u_old[i][0][j] = 0.;
u_old[i][N-1][j]=20.;
u[i][0][j] = 0.;
u[i][N-1][j]=20.;
u_old[i][j][0]=20.;
u_old[i][j][N-1]=20.;
u[i][j][0]=20.;
u[i][j][N-1]=20.;
u_old[0][i][j]=20.;
u_old[N-1][i][j]=20.;
u[0][i][j]=20.;
u[N-1][i][j]=20.;
}
}
// printf("X: %d - %d. Y: %d - %d. Z: %d - %d\n", radxi, radxf, radyi, radyf, radzi, radzf);
for (i = radxi; i <= radxf; i++) {
for (j = radyi; j <= radyf; j++) {
for (k = radzi; k <= radzf; k++) {
f[i][j][k] = 200;
}
}
}
}
void print_matrix(double*** A, int N){
int i,j,k;
for (i=0; i<N; i++){
printf("\n %d -th Layer \n", i);
for(j=0; j<N; j++){
for(k=0; k<N; k++){
printf("%lf \t", A[i][j][k]);
}
printf("\n");
}
}
}
int main(int argc, char *argv[]){
int N = 5;
int iter_max = 10;
double tolerance = 5; //possibly initialize to avoid segmentation error
double start_T;
int output_type = 0;
char *output_prefix = "poisson_res";
char *output_ext = "";
char output_filename[FILENAME_MAX];
double ***u_h = NULL;
double ***f_h = NULL;
double ***prev_u_h=NULL;
double ***u_d=NULL;
double ***f_d=NULL;
double ***prev_u_d=NULL;
/* get the paramters from the command line */
N = atoi(argv[1]); // grid size
iter_max = atoi(argv[2]); // max. no. of iterations
start_T = atof(argv[3]); // start T for all inner grid points
if (argc == 5) {
output_type = atoi(argv[4]); // ouput type
}
//ON CPU
//allocate memory
if ( (u_h = d_malloc_3d(N, N, N)) == NULL ) {
perror("array u: allocation failed");
exit(-1);
}
//allocate f
if ( (f_h = d_malloc_3d(N, N, N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
if ( (prev_u_h = d_malloc_3d(N, N, N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
//ON GPU 1
checkCudaErrors(cudaSetDevice(0));
if ( (u_d = d_malloc_3d_gpu(N, N, N)) == NULL ) {
perror("array u: allocation failed");
exit(-1);
}
if ( (f_d = d_malloc_3d_gpu(N,N,N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
if ( (prev_u_d = d_malloc_3d_gpu(N, N, N)) == NULL ) {
perror("array f: allocation failed");
exit(-1);
}
printf("We are going to start the Jacobi-Iteration \n");
assign_ufu_old(u_h,f_h,prev_u_h,N, start_T);
//create event in order to time
cudaEvent_t start, stop;
float elapsed;
cudaEventCreate(&start); cudaEventCreate(&stop);
//copy assigned Matrices to GPU
transfer_3d(u_d, u_h, N,N,N, cudaMemcpyHostToDevice);
transfer_3d(prev_u_d, prev_u_h, N,N,N, cudaMemcpyHostToDevice);
transfer_3d(f_d,f_h, N,N,N, cudaMemcpyHostToDevice);
//start for-loop
double step_width=2./(double)N;
double denominator = 1.0/(double)6;
int i;
double ***swap;
//create 3-dimensional grid
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
//reduce size of grid for smaller matrix
dim3 grid_size(((N-2)*0.5+block_size.x-1)/block_size.x, ((N-2)+block_size.y-1)/ block_size.y, ((N-2)+block_size.z-1)/block_size.z);
//enable peer access between devices
cudaSetDevice(0);
checkCudaErrors(cudaDeviceEnablePeerAccess(1,0));
cudaSetDevice(1);
checkCudaErrors(cudaDeviceEnablePeerAccess(0,0));
//start iteration
cudaEventRecord(start,0);
for (i=0; i< iter_max; i++){
printf("At iteration Nr %d \n", i);
checkCudaErrors(cudaSetDevice(0));
jacobi_gpu2<<<grid_size,block_size>>>(u_d,prev_u_d,f_d,N, step_width, denominator, 0);
cudaDeviceSynchronize();
cudaSetDevice(1);
jacobi_gpu2<<<grid_size,block_size>>>(u_d,prev_u_d,f_d,N, step_width, denominator, 1);
cudaDeviceSynchronize();
swap = u_d;
u_d = prev_u_d;
prev_u_d = swap;
}
cudaEventRecord(stop,0);
//copy matrices back to CPU
cudaSetDevice(0);
transfer_3d(u_h, u_d, N,N,N, cudaMemcpyDeviceToHost);
transfer_3d(prev_u_h, prev_u_d, N,N,N, cudaMemcpyDeviceToHost);
//print_matrix(u_h,N);
//print_matrix(prev_u_h,N);
//stop and destroy events
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start); cudaEventDestroy(stop);
printf("%lf \n",elapsed);
// dump results if wanted
switch(output_type) {
case 0:
// no output at all
break;
case 2:
print_matrix(prev_u_h,N);
break;
default:
fprintf(stderr, "Non-supported output type!\n");
break;
}
// de-allocate memory
free(u_h);
free(prev_u_h);
free(f_h);
free_gpu(u_d);
free_gpu(prev_u_d);
free_gpu(f_d);
return(0);
}
|
b770f67da0454adf5201cef16aee7fbc628cf8b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "timer.h"
#include <iostream>
#include <time.h>
// https://siboehm.com/articles/22/CUDA-MMM
#define CEIL_DIV(x, y) (x + y - 1) / y
__global__ void empty() {}
void matmul(const float *A, const float *B, float *C, int M, int K, int N) {
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
float sum = 0.0f;
for (int k = 0; k < K; ++k) {
sum += A[i * K + k] * B[k * N + j];
}
C[i * N + j] = sum;
}
}
}
__global__ void matmul_kernel(const float *A, const float *B, float *C, int M,
int K, int N) {
int tx = blockIdx.x * blockDim.x + threadIdx.x; // M
int ty = blockIdx.y * blockDim.y + threadIdx.y; // N
if (tx < M && tx < N) {
float sum = 0.0f;
for (int k = 0; k < K; ++k) {
sum += A[tx * K + k] * B[k * N + ty];
}
C[tx * N + ty] = sum;
}
}
bool compare_diff(const float *A, const float *B, int n) {
for (int i = 0; i < n; ++i) {
if (std::abs(A[i] - B[i]) > std::numeric_limits<float>::epsilon()) {
return true;
}
}
return false;
}
int main() {
srand(time(0));
int device_id = 0;
hipSetDevice(device_id);
hipStream_t stream;
hipStreamCreate(&stream);
int M = 256, K = 64, N = 128;
float *A = new float[M * K];
float *B = new float[K * N];
float *C = new float[M * N];
float *gpu_C = new float[M * N];
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, M * K * sizeof(float));
hipMalloc(&d_B, K * N * sizeof(float));
hipMalloc(&d_C, M * N * sizeof(float));
// generate data
for (int i = 0; i < M * K; ++i) {
A[i] = rand() % 100 * 2.2;
}
for (int i = 0; i < K * N; ++i) {
B[i] = rand() % 100 * 2.2;
}
CPUTimer cpu_timer;
cpu_timer.Reset();
matmul(A, B, C, M, K, N);
float cpu_cost = cpu_timer.ElapsedTime();
std::cout << "cpu time cost: " << cpu_cost << " ms." << std::endl;
// warmup
dim3 blockDim(32, 32, 1);
dim3 gridDim(CEIL_DIV(M, 32), CEIL_DIV(N, 32), 1);
GPUTimer gpu_timer;
gpu_timer.Start(stream);
hipMemcpy(d_A, A, M * K * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, K * N * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( matmul_kernel), dim3(gridDim), dim3(blockDim), 0, 0, d_A, d_B, d_C, M, K, N);
hipMemcpy(gpu_C, d_C, M * N * sizeof(float), hipMemcpyDeviceToHost);
gpu_timer.Stop(stream);
float gpu_cost = gpu_timer.ElapsedTime();
std::cout << "gpu time cost: " << gpu_cost << " ms." << std::endl;
bool has_diff = compare_diff(C, gpu_C, M * N);
std::cout << "diff: " << has_diff << std::endl;
// std::cout << "launch latency: " << elapse_time / (1.0f * repeat) << " ms."
// << std::endl;
return 0;
} | b770f67da0454adf5201cef16aee7fbc628cf8b1.cu | #include "cuda_runtime.h"
#include "timer.h"
#include <iostream>
#include <time.h>
// https://siboehm.com/articles/22/CUDA-MMM
#define CEIL_DIV(x, y) (x + y - 1) / y
__global__ void empty() {}
void matmul(const float *A, const float *B, float *C, int M, int K, int N) {
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
float sum = 0.0f;
for (int k = 0; k < K; ++k) {
sum += A[i * K + k] * B[k * N + j];
}
C[i * N + j] = sum;
}
}
}
__global__ void matmul_kernel(const float *A, const float *B, float *C, int M,
int K, int N) {
int tx = blockIdx.x * blockDim.x + threadIdx.x; // M
int ty = blockIdx.y * blockDim.y + threadIdx.y; // N
if (tx < M && tx < N) {
float sum = 0.0f;
for (int k = 0; k < K; ++k) {
sum += A[tx * K + k] * B[k * N + ty];
}
C[tx * N + ty] = sum;
}
}
bool compare_diff(const float *A, const float *B, int n) {
for (int i = 0; i < n; ++i) {
if (std::abs(A[i] - B[i]) > std::numeric_limits<float>::epsilon()) {
return true;
}
}
return false;
}
int main() {
srand(time(0));
int device_id = 0;
cudaSetDevice(device_id);
cudaStream_t stream;
cudaStreamCreate(&stream);
int M = 256, K = 64, N = 128;
float *A = new float[M * K];
float *B = new float[K * N];
float *C = new float[M * N];
float *gpu_C = new float[M * N];
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, M * K * sizeof(float));
cudaMalloc(&d_B, K * N * sizeof(float));
cudaMalloc(&d_C, M * N * sizeof(float));
// generate data
for (int i = 0; i < M * K; ++i) {
A[i] = rand() % 100 * 2.2;
}
for (int i = 0; i < K * N; ++i) {
B[i] = rand() % 100 * 2.2;
}
CPUTimer cpu_timer;
cpu_timer.Reset();
matmul(A, B, C, M, K, N);
float cpu_cost = cpu_timer.ElapsedTime();
std::cout << "cpu time cost: " << cpu_cost << " ms." << std::endl;
// warmup
dim3 blockDim(32, 32, 1);
dim3 gridDim(CEIL_DIV(M, 32), CEIL_DIV(N, 32), 1);
GPUTimer gpu_timer;
gpu_timer.Start(stream);
cudaMemcpy(d_A, A, M * K * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, K * N * sizeof(float), cudaMemcpyHostToDevice);
matmul_kernel<<<gridDim, blockDim>>>(d_A, d_B, d_C, M, K, N);
cudaMemcpy(gpu_C, d_C, M * N * sizeof(float), cudaMemcpyDeviceToHost);
gpu_timer.Stop(stream);
float gpu_cost = gpu_timer.ElapsedTime();
std::cout << "gpu time cost: " << gpu_cost << " ms." << std::endl;
bool has_diff = compare_diff(C, gpu_C, M * N);
std::cout << "diff: " << has_diff << std::endl;
// std::cout << "launch latency: " << elapse_time / (1.0f * repeat) << " ms."
// << std::endl;
return 0;
} |
90d3769a38e7eb831615fd44ae19e836814ee706.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the extension scheme
******************************************************************************/
typedef struct
{
double sR;
double sL;
} double_eno_derivative;
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
// convert subindex to linear index
// periodic boundary conditions are assumed
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
/******************************************************************************
* calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3]
* without boundary correction
******************************************************************************/
__device__ inline
double_eno_derivative eno_derivative_field( double v4, double v1, double v0, double v2, double v3, double ds)
{
double p2m;
double_eno_derivative eno_d;
double p2 = v1 - 2.0 * v0 + v2;
double p2r = v0 - 2.0 * v2 + v3;
p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2);
eno_d.sR = (v2 - v0) / ds - ds * p2m;
double p2l = v0 - 2.0 * v1 + v4;
p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2);
eno_d.sL = (v0 - v1) / ds + ds * p2m;
return eno_d;
}
/*******************************************************************************
* calculate upwind normal with ENO scheme at a single point
* along a single direction
*******************************************************************************/
__device__ inline
double upwind_normal_point( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds)
{
double p2m;
double p2 = v1 - 2.0 * v0 + v2;
double p2r = v0 - 2.0 * v2 + v3;
p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2);
double vr = (pr==ds) ? v2 : 0;
double sR = (vr - v0) / pr - pr * p2m;
double p2l = v0 - 2.0 * v1 + v4;
p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2);
double vl = (pl==ds) ? v1 : 0;
double sL = (v0 - vl) / pl + pl * p2m;
return (fabs(vr) < fabs(vl)) ? sR : sL;
}
// calculate the upwind normal
__global__
void upwind_normal(double * nx, double * ny, double * nz, double const * lsf, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
nx[ind] = upwind_normal_point( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx);
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
ny[ind] = upwind_normal_point( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy);
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
nz[ind] = upwind_normal_point( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz);
}
// calculate extend step
// now lsf represents a scalar field (not the level set function)
__global__
void extend_step(double * step, double const * lsf, bool const * boundary, double const * vx, double const * vy, double const * vz, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
double_eno_derivative eno_dx = eno_derivative_field( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], dx);
double xR = eno_dx.sR;
double xL = eno_dx.sL;
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
double_eno_derivative eno_dy = eno_derivative_field( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], dy);
double yF = eno_dy.sR;
double yB = eno_dy.sL;
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
double_eno_derivative eno_dz = eno_derivative_field( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], dz);
double zU = eno_dz.sR;
double zD = eno_dz.sL;
step[ind] = min2(0,vx[ind]) * xR + max2(0,vx[ind]) * xL +
min2(0,vy[ind]) * yF + max2(0,vy[ind]) * yB +
min2(0,vz[ind]) * zU + max2(0,vz[ind]) * zD ;
// keep boundary values fixed
step[ind] = boundary[ind] ? 0.0 : step[ind];
}
| 90d3769a38e7eb831615fd44ae19e836814ee706.cu | /*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the extension scheme
******************************************************************************/
typedef struct
{
double sR;
double sL;
} double_eno_derivative;
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
// convert subindex to linear index
// periodic boundary conditions are assumed
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
/******************************************************************************
* calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3]
* without boundary correction
******************************************************************************/
__device__ inline
double_eno_derivative eno_derivative_field( double v4, double v1, double v0, double v2, double v3, double ds)
{
double p2m;
double_eno_derivative eno_d;
double p2 = v1 - 2.0 * v0 + v2;
double p2r = v0 - 2.0 * v2 + v3;
p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2);
eno_d.sR = (v2 - v0) / ds - ds * p2m;
double p2l = v0 - 2.0 * v1 + v4;
p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2);
eno_d.sL = (v0 - v1) / ds + ds * p2m;
return eno_d;
}
/*******************************************************************************
* calculate upwind normal with ENO scheme at a single point
* along a single direction
*******************************************************************************/
__device__ inline
double upwind_normal_point( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds)
{
double p2m;
double p2 = v1 - 2.0 * v0 + v2;
double p2r = v0 - 2.0 * v2 + v3;
p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2);
double vr = (pr==ds) ? v2 : 0;
double sR = (vr - v0) / pr - pr * p2m;
double p2l = v0 - 2.0 * v1 + v4;
p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2);
double vl = (pl==ds) ? v1 : 0;
double sL = (v0 - vl) / pl + pl * p2m;
return (fabs(vr) < fabs(vl)) ? sR : sL;
}
// calculate the upwind normal
__global__
void upwind_normal(double * nx, double * ny, double * nz, double const * lsf, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
nx[ind] = upwind_normal_point( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx);
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
ny[ind] = upwind_normal_point( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy);
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
nz[ind] = upwind_normal_point( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz);
}
// calculate extend step
// now lsf represents a scalar field (not the level set function)
__global__
void extend_step(double * step, double const * lsf, bool const * boundary, double const * vx, double const * vy, double const * vz, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
double_eno_derivative eno_dx = eno_derivative_field( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], dx);
double xR = eno_dx.sR;
double xL = eno_dx.sL;
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
double_eno_derivative eno_dy = eno_derivative_field( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], dy);
double yF = eno_dy.sR;
double yB = eno_dy.sL;
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
double_eno_derivative eno_dz = eno_derivative_field( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], dz);
double zU = eno_dz.sR;
double zD = eno_dz.sL;
step[ind] = min2(0,vx[ind]) * xR + max2(0,vx[ind]) * xL +
min2(0,vy[ind]) * yF + max2(0,vy[ind]) * yB +
min2(0,vz[ind]) * zU + max2(0,vz[ind]) * zD ;
// keep boundary values fixed
step[ind] = boundary[ind] ? 0.0 : step[ind];
}
|
d0d92a0f4a94256bc9c73d026eb1b3a27edf1fd6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sobelFilterShared(unsigned char *data, unsigned char *result, int width, int height){
// Data cache: threadIdx.x , threadIdx.y
const int n = Mask_size / 2;
__shared__ int s_data[BLOCKSIZE + Mask_size * 2 ][BLOCKSIZE + Mask_size * 2];
// global mem address of the current thread in the whole grid
const int pos = threadIdx.x + blockIdx.x * blockDim.x + threadIdx.y * width + blockIdx.y * blockDim.y * width;
// load cache (32x32 shared memory, 16x16 threads blocks)
// each threads loads four values from global memory into shared mem
// if in image area, get value in global mem, else 0
int x, y; // image based coordinate
// original image based coordinate
const int x0 = threadIdx.x + blockIdx.x * blockDim.x;
const int y0 = threadIdx.y + blockIdx.y * blockDim.y;
// case1: upper left
x = x0 - n;
y = y0 - n;
if ( x < 0 || y < 0 )
s_data[threadIdx.y][threadIdx.x] = 0;
else
s_data[threadIdx.y][threadIdx.x] = *(data + pos - n - (width * n));
// case2: upper right
x = x0 + n;
y = y0 - n;
if ( x > (width - 1) || y < 0 )
s_data[threadIdx.y][threadIdx.x + blockDim.x] = 0;
else
s_data[threadIdx.y][threadIdx.x + blockDim.x] = *(data + pos + n - (width * n));
// case3: lower left
x = x0 - n;
y = y0 + n;
if (x < 0 || y > (height - 1))
s_data[threadIdx.y + blockDim.y][threadIdx.x] = 0;
else
s_data[threadIdx.y + blockDim.y][threadIdx.x] = *(data + pos - n + (width * n));
// case4: lower right
x = x0 + n;
y = y0 + n;
if ( x > (width - 1) || y > (height - 1))
s_data[threadIdx.y + blockDim.y][threadIdx.x + blockDim.x] = 0;
else
s_data[threadIdx.y + blockDim.y][threadIdx.x + blockDim.x] = *(data + pos + n + (width * n));
__syncthreads();
// convolution
int sum = 0;
x = n + threadIdx.x;
y = n + threadIdx.y;
for (int i = - n; i <= n; i++)
for (int j = - n; j <= n; j++)
sum += s_data[y + i][x + j] * Global_Mask[n + i] * Global_Mask[n + j];
result[pos] = sum;
} | d0d92a0f4a94256bc9c73d026eb1b3a27edf1fd6.cu | #include "includes.h"
__global__ void sobelFilterShared(unsigned char *data, unsigned char *result, int width, int height){
// Data cache: threadIdx.x , threadIdx.y
const int n = Mask_size / 2;
__shared__ int s_data[BLOCKSIZE + Mask_size * 2 ][BLOCKSIZE + Mask_size * 2];
// global mem address of the current thread in the whole grid
const int pos = threadIdx.x + blockIdx.x * blockDim.x + threadIdx.y * width + blockIdx.y * blockDim.y * width;
// load cache (32x32 shared memory, 16x16 threads blocks)
// each threads loads four values from global memory into shared mem
// if in image area, get value in global mem, else 0
int x, y; // image based coordinate
// original image based coordinate
const int x0 = threadIdx.x + blockIdx.x * blockDim.x;
const int y0 = threadIdx.y + blockIdx.y * blockDim.y;
// case1: upper left
x = x0 - n;
y = y0 - n;
if ( x < 0 || y < 0 )
s_data[threadIdx.y][threadIdx.x] = 0;
else
s_data[threadIdx.y][threadIdx.x] = *(data + pos - n - (width * n));
// case2: upper right
x = x0 + n;
y = y0 - n;
if ( x > (width - 1) || y < 0 )
s_data[threadIdx.y][threadIdx.x + blockDim.x] = 0;
else
s_data[threadIdx.y][threadIdx.x + blockDim.x] = *(data + pos + n - (width * n));
// case3: lower left
x = x0 - n;
y = y0 + n;
if (x < 0 || y > (height - 1))
s_data[threadIdx.y + blockDim.y][threadIdx.x] = 0;
else
s_data[threadIdx.y + blockDim.y][threadIdx.x] = *(data + pos - n + (width * n));
// case4: lower right
x = x0 + n;
y = y0 + n;
if ( x > (width - 1) || y > (height - 1))
s_data[threadIdx.y + blockDim.y][threadIdx.x + blockDim.x] = 0;
else
s_data[threadIdx.y + blockDim.y][threadIdx.x + blockDim.x] = *(data + pos + n + (width * n));
__syncthreads();
// convolution
int sum = 0;
x = n + threadIdx.x;
y = n + threadIdx.y;
for (int i = - n; i <= n; i++)
for (int j = - n; j <= n; j++)
sum += s_data[y + i][x + j] * Global_Mask[n + i] * Global_Mask[n + j];
result[pos] = sum;
} |
60ff10b153146ad160cb41f2d8b6cb35583279a4.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <rocblas.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <ops/declarable/helpers/batched_gemm.h>
#include <ops/specials_cuda.h>
#include <system/op_boilerplate.h>
#include <types/float16.h>
#include <indexing/NDIndexUtils.h>
#include <ops/declarable/CustomOperations.h>
namespace sd {
namespace ops {
namespace helpers {
static void bgemm(sd::NDArray *a, sd::NDArray *b, sd::NDArray *c, NDArray *alphas, NDArray *betas,
int transA, int transB, int M, int N, int K, const int lda, const int ldb, const int ldc,
sd::NDArray *all) {
sd::NDArray *allIndex = nullptr;
if(all != nullptr)
allIndex = all;
else {
sd::NDArray allLocal = NDIndexUtils::createAll();
all = &allLocal;
}
int batchSize = a->sizeAt(0) / 2;
std::vector<NDArray *>inputs;
std::vector<NDArray *> keyInputs;
std::vector<NDArray *> outputs;
sd::ops::create_view createView;
//add alpha and beta before the batch gemm, this just needs to be broadcasted
inputs.push_back(alphas);
inputs.push_back(betas);
//divide by 2: queries and keys
for(int i = 0; i < batchSize; i++) {
auto point = NDIndexUtils::createPoint(i);
auto aSlice = createView.evaluate({a,&point,all,all},{},{});
auto bSlice = createView.evaluate({b,&point,all,all},{},{});
auto outSlice = createView.evaluate({c,&point,all,all},{},{});
inputs.push_back(aSlice.at(0));
keyInputs.push_back(bSlice.at(0));
outputs.push_back(outSlice.at(0));
}
bgemm(inputs,keyInputs,outputs,alphas,betas,transA,transB,M,N,K,lda,ldb,ldc);
}
//////////////////////////////////////////////////////////////////////////////
// bsxMXK x bSxKxN = bSxMxN
void bgemm(const std::vector<NDArray*>& vA, const std::vector<NDArray*>& vB, std::vector<NDArray*>& vC,
const NDArray* alphas, const NDArray* betas, int transA, int transB, int M, int N, int K, const int lda,
const int ldb, const int ldc) {
const auto bS = vA.size(); // batch size
std::vector<NDArray*> pA(bS), pB(bS), pC(bS);
std::vector<NDArray*> toDelete;
for (int i = 0; i < bS; ++i) {
if (vA[i]->ews() != 1) {
pA[i] = new NDArray(vA[i]->dup('f'));
toDelete.emplace_back(pA[i]);
} else
pA[i] = vA[i];
if (vB[i]->ews() != 1) {
pB[i] = new NDArray(vB[i]->dup('f'));
toDelete.emplace_back(pB[i]);
} else
pB[i] = vB[i];
if (vC[i]->ews() != 1) {
pC[i] = new NDArray(vC[i]->dup('f'));
toDelete.emplace_back(pC[i]);
} else
pC[i] = vC[i];
if (pC[i]->ordering() != 'f') {
auto temp = pA[i];
pA[i] = new NDArray(pB[i]->permute({1, 0}));
pB[i] = new NDArray(temp->permute({1, 0}));
pC[i] = new NDArray(pC[i]->permute({1, 0}));
toDelete.push_back(pA[i]);
toDelete.push_back(pB[i]);
toDelete.push_back(pC[i]);
M = pA[i]->sizeAt(0);
K = pA[i]->sizeAt(1);
N = pB[i]->sizeAt(1);
}
NDArray::prepareSpecialUse({pC[i]}, {pA[i], pB[i]});
NDArray::registerSpecialUse({pC[i]}, {pA[i], pB[i]});
}
NDArray::prepareSpecialUse({}, {alphas, betas});
NDArray::registerSpecialUse({}, {alphas, betas});
std::vector<void*> pAbuffs(bS), pBbuffs(bS), pCbuffs(bS);
for (int i = 0; i < bS; ++i) {
pAbuffs[i] = pA[i]->specialBuffer();
pBbuffs[i] = pB[i]->specialBuffer();
pCbuffs[i] = pC[i]->specialBuffer();
}
sd::LaunchContext* context = vA[0]->getContext();
PointersManager manager(context, "helpers::bgemm cuda");
const void** aBuffers = reinterpret_cast<const void**>(manager.replicatePointer(pAbuffs.data(), bS * sizeof(void*)));
const void** bBuffers = reinterpret_cast<const void**>(manager.replicatePointer(pBbuffs.data(), bS * sizeof(void*)));
void** cBuffers = reinterpret_cast<void**>(manager.replicatePointer(pCbuffs.data(), bS * sizeof(void*)));
const hipblasOperation_t transAblas = transA == 112 ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transBblas = transB == 112 ? HIPBLAS_OP_T : HIPBLAS_OP_N;
if(M < 0) throw std::runtime_error("M < 0");
if(N < 0) throw std::runtime_error("N < 0");
if(K < 0) throw std::runtime_error("K < 0");
const auto aType = pA[0]->dataType();
const auto bType = pB[0]->dataType();
const auto cType = pC[0]->dataType();
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
auto handle = reinterpret_cast<hipblasHandle_t*>(context->getCublasHandle());
auto stream = context->getCudaStream();
auto status = hipblasSetStream(*handle, *stream);
if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda set stream failed ! Please double check the passed in handle.", status);
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
// choose appropriate cuda gemm api depending on data types
if (ABC && aType == DataType::DOUBLE) {
double alpha = alphas->e<double>(0);
double beta = betas->e<double>(0);
status = hipblasDgemmBatched(*handle, transAblas, transBblas, M, N, K, &alpha, (const double**)aBuffers, lda,
(const double**)bBuffers, ldb, &beta, (double**)cBuffers, ldc, bS);
} else if (ABC && aType == DataType::FLOAT32) {
float alpha = alphas->e<float>(0);
float beta = betas->e<float>(0);
status = hipblasSgemmBatched(*handle, transAblas, transBblas, M, N, K, &alpha, (const float**)aBuffers, lda,
(const float**)bBuffers, ldb, &beta, (float**)cBuffers, ldc, bS);
} else if (ABC && aType == DataType::HALF) {
__half alpha = alphas->e<float>(0);
__half beta = betas->e<float>(0);
status = hipblasHgemmBatched(*handle, transAblas, transBblas, M, N, K, &alpha, (const __half**)aBuffers, lda,
(const __half**)bBuffers, ldb, &beta, (__half**)cBuffers, ldc, bS);
} else if (AB && aType == DataType::INT8 && cType == DataType::FLOAT32) {
float alpha = alphas->e<float>(0);
float beta = betas->e<float>(0);
status = hipblasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &alpha, aBuffers, HIP_R_8I, lda, bBuffers,
HIP_R_8I, ldb, &beta, cBuffers, HIP_R_32F, ldc, bS, HIP_R_32F, HIPBLAS_GEMM_DEFAULT);
} else if (AB && aType == DataType::HALF && cType == DataType::FLOAT32) {
float alpha = alphas->e<float>(0);
float beta = betas->e<float>(0);
status =
hipblasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &alpha, aBuffers, HIP_R_16F, lda, bBuffers,
HIP_R_16F, ldb, &beta, cBuffers, HIP_R_32F, ldc, bS, HIP_R_32F, HIPBLAS_GEMM_DEFAULT);
} else
throw std::runtime_error("batched gemm cuda: this mode is not implemented yet !");
if (status != HIPBLAS_STATUS_SUCCESS) {
sd_printf("Status was: %d\n",status);
throw cuda_exception::build("MmulHelper::mmulMxM cuda execution failed !", status);
}
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0) {
throw cuda_exception::build("MmulHelper::mmulMxM cuda stream synchronize failed !", cudaResult);
}
for (int i = 0; i < bS; ++i)
if (vC[i]->ews() != 1) vC[i]->assign(pC[i]);
for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i];
}
} // namespace helpers
} // namespace ops
} // namespace sd
| 60ff10b153146ad160cb41f2d8b6cb35583279a4.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <cublas_v2.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <ops/declarable/helpers/batched_gemm.h>
#include <ops/specials_cuda.h>
#include <system/op_boilerplate.h>
#include <types/float16.h>
#include <indexing/NDIndexUtils.h>
#include <ops/declarable/CustomOperations.h>
namespace sd {
namespace ops {
namespace helpers {
static void bgemm(sd::NDArray *a, sd::NDArray *b, sd::NDArray *c, NDArray *alphas, NDArray *betas,
int transA, int transB, int M, int N, int K, const int lda, const int ldb, const int ldc,
sd::NDArray *all) {
sd::NDArray *allIndex = nullptr;
if(all != nullptr)
allIndex = all;
else {
sd::NDArray allLocal = NDIndexUtils::createAll();
all = &allLocal;
}
int batchSize = a->sizeAt(0) / 2;
std::vector<NDArray *>inputs;
std::vector<NDArray *> keyInputs;
std::vector<NDArray *> outputs;
sd::ops::create_view createView;
//add alpha and beta before the batch gemm, this just needs to be broadcasted
inputs.push_back(alphas);
inputs.push_back(betas);
//divide by 2: queries and keys
for(int i = 0; i < batchSize; i++) {
auto point = NDIndexUtils::createPoint(i);
auto aSlice = createView.evaluate({a,&point,all,all},{},{});
auto bSlice = createView.evaluate({b,&point,all,all},{},{});
auto outSlice = createView.evaluate({c,&point,all,all},{},{});
inputs.push_back(aSlice.at(0));
keyInputs.push_back(bSlice.at(0));
outputs.push_back(outSlice.at(0));
}
bgemm(inputs,keyInputs,outputs,alphas,betas,transA,transB,M,N,K,lda,ldb,ldc);
}
//////////////////////////////////////////////////////////////////////////////
// bsxMXK x bSxKxN = bSxMxN
void bgemm(const std::vector<NDArray*>& vA, const std::vector<NDArray*>& vB, std::vector<NDArray*>& vC,
const NDArray* alphas, const NDArray* betas, int transA, int transB, int M, int N, int K, const int lda,
const int ldb, const int ldc) {
const auto bS = vA.size(); // batch size
std::vector<NDArray*> pA(bS), pB(bS), pC(bS);
std::vector<NDArray*> toDelete;
for (int i = 0; i < bS; ++i) {
if (vA[i]->ews() != 1) {
pA[i] = new NDArray(vA[i]->dup('f'));
toDelete.emplace_back(pA[i]);
} else
pA[i] = vA[i];
if (vB[i]->ews() != 1) {
pB[i] = new NDArray(vB[i]->dup('f'));
toDelete.emplace_back(pB[i]);
} else
pB[i] = vB[i];
if (vC[i]->ews() != 1) {
pC[i] = new NDArray(vC[i]->dup('f'));
toDelete.emplace_back(pC[i]);
} else
pC[i] = vC[i];
if (pC[i]->ordering() != 'f') {
auto temp = pA[i];
pA[i] = new NDArray(pB[i]->permute({1, 0}));
pB[i] = new NDArray(temp->permute({1, 0}));
pC[i] = new NDArray(pC[i]->permute({1, 0}));
toDelete.push_back(pA[i]);
toDelete.push_back(pB[i]);
toDelete.push_back(pC[i]);
M = pA[i]->sizeAt(0);
K = pA[i]->sizeAt(1);
N = pB[i]->sizeAt(1);
}
NDArray::prepareSpecialUse({pC[i]}, {pA[i], pB[i]});
NDArray::registerSpecialUse({pC[i]}, {pA[i], pB[i]});
}
NDArray::prepareSpecialUse({}, {alphas, betas});
NDArray::registerSpecialUse({}, {alphas, betas});
std::vector<void*> pAbuffs(bS), pBbuffs(bS), pCbuffs(bS);
for (int i = 0; i < bS; ++i) {
pAbuffs[i] = pA[i]->specialBuffer();
pBbuffs[i] = pB[i]->specialBuffer();
pCbuffs[i] = pC[i]->specialBuffer();
}
sd::LaunchContext* context = vA[0]->getContext();
PointersManager manager(context, "helpers::bgemm cuda");
const void** aBuffers = reinterpret_cast<const void**>(manager.replicatePointer(pAbuffs.data(), bS * sizeof(void*)));
const void** bBuffers = reinterpret_cast<const void**>(manager.replicatePointer(pBbuffs.data(), bS * sizeof(void*)));
void** cBuffers = reinterpret_cast<void**>(manager.replicatePointer(pCbuffs.data(), bS * sizeof(void*)));
const cublasOperation_t transAblas = transA == 112 ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transBblas = transB == 112 ? CUBLAS_OP_T : CUBLAS_OP_N;
if(M < 0) throw std::runtime_error("M < 0");
if(N < 0) throw std::runtime_error("N < 0");
if(K < 0) throw std::runtime_error("K < 0");
const auto aType = pA[0]->dataType();
const auto bType = pB[0]->dataType();
const auto cType = pC[0]->dataType();
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
auto handle = reinterpret_cast<cublasHandle_t*>(context->getCublasHandle());
auto stream = context->getCudaStream();
auto status = cublasSetStream_v2(*handle, *stream);
if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda set stream failed ! Please double check the passed in handle.", status);
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
// choose appropriate cuda gemm api depending on data types
if (ABC && aType == DataType::DOUBLE) {
double alpha = alphas->e<double>(0);
double beta = betas->e<double>(0);
status = cublasDgemmBatched(*handle, transAblas, transBblas, M, N, K, &alpha, (const double**)aBuffers, lda,
(const double**)bBuffers, ldb, &beta, (double**)cBuffers, ldc, bS);
} else if (ABC && aType == DataType::FLOAT32) {
float alpha = alphas->e<float>(0);
float beta = betas->e<float>(0);
status = cublasSgemmBatched(*handle, transAblas, transBblas, M, N, K, &alpha, (const float**)aBuffers, lda,
(const float**)bBuffers, ldb, &beta, (float**)cBuffers, ldc, bS);
} else if (ABC && aType == DataType::HALF) {
__half alpha = alphas->e<float>(0);
__half beta = betas->e<float>(0);
status = cublasHgemmBatched(*handle, transAblas, transBblas, M, N, K, &alpha, (const __half**)aBuffers, lda,
(const __half**)bBuffers, ldb, &beta, (__half**)cBuffers, ldc, bS);
} else if (AB && aType == DataType::INT8 && cType == DataType::FLOAT32) {
float alpha = alphas->e<float>(0);
float beta = betas->e<float>(0);
status = cublasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &alpha, aBuffers, CUDA_R_8I, lda, bBuffers,
CUDA_R_8I, ldb, &beta, cBuffers, CUDA_R_32F, ldc, bS, CUDA_R_32F, CUBLAS_GEMM_DEFAULT);
} else if (AB && aType == DataType::HALF && cType == DataType::FLOAT32) {
float alpha = alphas->e<float>(0);
float beta = betas->e<float>(0);
status =
cublasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &alpha, aBuffers, CUDA_R_16F, lda, bBuffers,
CUDA_R_16F, ldb, &beta, cBuffers, CUDA_R_32F, ldc, bS, CUDA_R_32F, CUBLAS_GEMM_DEFAULT);
} else
throw std::runtime_error("batched gemm cuda: this mode is not implemented yet !");
if (status != CUBLAS_STATUS_SUCCESS) {
sd_printf("Status was: %d\n",status);
throw cuda_exception::build("MmulHelper::mmulMxM cuda execution failed !", status);
}
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0) {
throw cuda_exception::build("MmulHelper::mmulMxM cuda stream synchronize failed !", cudaResult);
}
for (int i = 0; i < bS; ++i)
if (vC[i]->ews() != 1) vC[i]->assign(pC[i]);
for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i];
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
459764f6a15157d2d2e7aee52d02d4bd147ec211.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/layer_norm_kernel.h"
#include "gflags/gflags.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/layer_norm_impl.cu.h"
#include "paddle/phi/kernels/funcs/layer_norm_util.h"
DECLARE_bool(use_fast_math);
namespace phi {
#ifdef PADDLE_WITH_CUDA
template <typename U>
__device__ inline void WelfordOnline(U val, U *mean, U *square, U *count) {
*count += 1;
U delta1 = val - *mean;
*mean += delta1 / (*count);
U delta2 = val - *mean;
*square += delta1 * delta2;
}
template <typename U>
__device__ inline void WelfordOnline(
U b_mean, U b_square, U b_cnt, U *mean, U *square, U *count) {
if (b_cnt == 0) {
return;
}
U new_cnt = *count + b_cnt;
U nb_n = b_cnt / new_cnt;
U delta = b_mean - *mean;
*mean += delta * nb_n;
*square += b_square + delta * delta * (*count) * nb_n;
*count = new_cnt;
}
template <typename U>
__device__ inline void WelfordWarpAllReduce(U *mean, U *square, U *count) {
constexpr int kWarpSize = 32;
#pragma unroll
for (int mask = 1; mask < kWarpSize; mask *= 2) {
U b_mean = __shfl_down_sync(0xffffffff, *mean, mask);
U b_square = __shfl_down_sync(0xffffffff, *square, mask);
U b_cnt = __shfl_down_sync(0xffffffff, *count, mask);
WelfordOnline<U>(b_mean, b_square, b_cnt, mean, square, count);
}
*mean = __shfl_sync(0xffffffff, *mean, 0, kWarpSize);
*square = __shfl_sync(0xffffffff, *square, 0, kWarpSize);
*count = __shfl_sync(0xffffffff, *count, 0, kWarpSize);
}
template <int VecSize>
struct ThreadAssigner {
__device__ __forceinline__ int operator()(const int cols,
const int cols_per_thread,
int32_t *last_tid_idx) {
return cols_per_thread;
}
};
template <>
struct ThreadAssigner<1> {
__device__ inline int operator()(const int cols,
const int cols_per_thread,
int *last_tid_idx) {
int cols_this_thread = cols_per_thread;
int last_tid = (cols / cols_per_thread);
*last_tid_idx = last_tid;
if (threadIdx.x == last_tid) {
cols_this_thread = cols - cols_per_thread * last_tid;
} else if (threadIdx.x > last_tid) {
cols_this_thread = 0;
}
return cols_this_thread;
}
};
template <typename T, typename U, int VecSize>
struct LayerNormDataReader {
__device__ inline void operator()(const T *__restrict__ row_src,
U *buffer,
const int last_tid_idx,
const int read_times,
const int cols_this_thread) {
using VecT = phi::AlignedVector<T, VecSize>;
const VecT *__restrict__ v_src =
reinterpret_cast<const VecT *__restrict__>(row_src);
for (int i = 0; i < read_times; ++i) {
VecT temp_src = v_src[threadIdx.x + i * blockDim.x];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
buffer[i * VecSize + j] = static_cast<U>(temp_src[j]);
}
}
}
};
template <typename T, typename U>
struct LayerNormDataReader<T, U, 1> {
__device__ inline void operator()(const T *__restrict__ row_src,
U *buffer,
const int last_tid_idx,
const int read_times,
const int cols_this_thread) {
// read_time is just cols_per_thread while VecSize is 1.
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
buffer[i] = static_cast<U>(row_src[threadIdx.x + last_tid_idx * i]);
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
buffer[i] = static_cast<U>(row_src[i + read_times * last_tid_idx]);
}
}
}
};
template <typename T, typename U, bool IsSameType, int VecSize>
struct LayerNormDataWritter {
__device__ inline void operator()(
T *__restrict__ row_dst,
const U *__restrict__ buffer,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ scale,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ bias,
const U row_mean,
const U row_inv_var,
const int write_times,
const int cols_this_thread,
const int last_tid_idx,
const bool valid_scale,
const bool valid_bias) {
using VecT = phi::AlignedVector<T, VecSize>;
using ScaleT = funcs::LayerNormScaleBiasT<T, U, IsSameType>;
using VecScaleT = phi::AlignedVector<ScaleT, VecSize>;
VecT *v_dst = reinterpret_cast<VecT *>(row_dst);
// cols_this_thread is just cols_per_thread
if ((!valid_scale) && (!valid_bias)) {
for (int i = 0; i < write_times; ++i) {
VecT temp_dst;
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>((buffer[i * VecSize + j] - row_mean) *
row_inv_var);
}
v_dst[threadIdx.x + blockDim.x * i] = temp_dst;
}
} else {
const VecScaleT *__restrict__ v_scale =
reinterpret_cast<const VecScaleT *__restrict__>(scale);
const VecScaleT *__restrict__ v_bias =
reinterpret_cast<const VecScaleT *__restrict__>(bias);
if (valid_scale && valid_bias) {
for (int i = 0; i < write_times; ++i) {
int idx = threadIdx.x + blockDim.x * i;
VecT temp_dst;
VecScaleT temp_v_scale = v_scale[idx];
VecScaleT temp_v_bias = v_bias[idx];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>(
static_cast<U>(temp_v_scale[j]) *
(buffer[i * VecSize + j] - row_mean) * row_inv_var +
static_cast<U>(temp_v_bias[j]));
}
v_dst[idx] = temp_dst;
}
} else {
if (valid_scale) {
for (int i = 0; i < write_times; ++i) {
int idx = threadIdx.x + blockDim.x * i;
VecT temp_dst;
VecScaleT temp_v_scale = v_scale[idx];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>(
static_cast<U>(temp_v_scale[j]) *
(buffer[i * VecSize + j] - row_mean) * row_inv_var);
}
v_dst[idx] = temp_dst;
}
} else {
for (int i = 0; i < write_times; ++i) {
int idx = threadIdx.x + blockDim.x * i;
VecT temp_dst;
VecScaleT temp_v_bias = v_bias[idx];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>(
(buffer[i * VecSize + j] - row_mean) * row_inv_var +
static_cast<U>(temp_v_bias[j]));
}
v_dst[idx] = temp_dst;
}
}
}
}
}
};
template <typename T, typename U, bool IsSameType>
struct LayerNormDataWritter<T, U, IsSameType, 1> {
__device__ __forceinline__ void operator()(
T *__restrict__ row_dst,
U *__restrict__ buffer,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ scale,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ bias,
const U row_mean,
const U row_inv_var,
const int write_times,
const int cols_this_thread,
const int last_tid_idx,
const bool valid_scale,
const bool valid_bias) {
// write_times is just col_per_thread.
if ((!valid_scale) && (!valid_bias)) {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
row_dst[threadIdx.x + last_tid_idx * i] =
(buffer[i] - row_mean) * row_inv_var;
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
row_dst[last_tid_idx * write_times + i] =
(buffer[i] - row_mean) * row_inv_var;
}
}
} else if (valid_scale && valid_bias) {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = threadIdx.x + last_tid_idx * i;
row_dst[idx] =
static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = last_tid_idx * write_times + i;
row_dst[idx] =
static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
}
} else {
if (valid_scale) {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = threadIdx.x + last_tid_idx * i;
row_dst[idx] = static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var);
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = last_tid_idx * write_times + i;
row_dst[idx] = static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var);
}
}
} else {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = threadIdx.x + last_tid_idx * i;
row_dst[idx] = static_cast<T>((buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = last_tid_idx * write_times + i;
row_dst[idx] = static_cast<T>((buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
}
}
}
}
};
template <typename IndexT, typename T, typename U, bool IsSameType, int VecSize>
__global__ void LayerNormFwdWithWelford(
const T *__restrict__ src_data,
T *dst_data,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ scale,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ bias,
U *mean,
U *var,
const U epsilon,
const IndexT rows,
const int32_t cols,
const int32_t cols_per_thread,
const bool valid_scale,
const bool valid_bias) {
constexpr int kWarpSize = 32;
int last_tid_idx = 0; // For condition once vecSize is 1.
IndexT row_offset = blockIdx.x * blockDim.y + threadIdx.y;
int cols_this_thread =
ThreadAssigner<VecSize>()(cols, cols_per_thread, &last_tid_idx);
int read_times = cols_per_thread / VecSize;
if (row_offset < rows) {
U buffer[kWarpSize];
U tid_cnt = static_cast<U>(0);
U tid_mean = static_cast<U>(0);
U tid_square = static_cast<U>(0);
const T *__restrict__ row_src = src_data + row_offset * cols;
T *row_dst = dst_data + row_offset * cols;
LayerNormDataReader<T, U, VecSize>()(
row_src, buffer, last_tid_idx, read_times, cols_this_thread);
for (int i = 0; i < cols_this_thread; i++) {
WelfordOnline<U>(buffer[i], &tid_mean, &tid_square, &tid_cnt);
}
U warp_cnt = tid_cnt;
U warp_mean = tid_mean;
U warp_square = tid_square;
WelfordWarpAllReduce<U>(&warp_mean, &warp_square, &warp_cnt);
U row_variance = max(warp_square / warp_cnt, 0.f);
U row_inv_var = funcs::rsqrt_(row_variance + epsilon);
// TODO(limingshu): make code below vectorization.
if (threadIdx.x == 0) {
// warp_mean is just row_mean here.
mean[row_offset] = warp_mean;
var[row_offset] = row_variance;
}
LayerNormDataWritter<T, U, IsSameType, VecSize>()(row_dst,
buffer,
scale,
bias,
warp_mean,
row_inv_var,
read_times,
cols_this_thread,
last_tid_idx,
valid_scale,
valid_bias);
}
}
template <typename Context, typename T, typename U>
void LaunchLayerNormKernel(const Context &dev_ctx,
const T *x_data,
T *y_data,
const void *void_scale_data,
const void *void_bias_data,
U *mean_data,
U *var_data,
float epsilon,
const int64_t rows,
const int cols,
const bool valid_scale,
const bool valid_bias,
const bool is_same_type) {
constexpr int WarpSize = 32;
constexpr int RowPerBlock = 4;
int64_t block_size = (rows + (RowPerBlock - 1)) / RowPerBlock;
dim3 threads(WarpSize, RowPerBlock, 1);
int vec_size = 1;
int cols_per_thread = (cols + (WarpSize - 1)) / WarpSize;
if (cols_per_thread > 1 && (cols % WarpSize == 0)) {
int data_vec_size = 0;
uint64_t addr = (reinterpret_cast<uint64_t>(x_data) |
reinterpret_cast<uint64_t>(y_data));
if (valid_bias || valid_scale) {
if (is_same_type) {
addr = valid_scale
? (addr | reinterpret_cast<uint64_t>(void_scale_data))
: addr;
addr = valid_bias ? (addr | reinterpret_cast<uint64_t>(void_bias_data))
: addr;
data_vec_size = phi::GetVectorizedSize<T>(reinterpret_cast<T *>(addr));
} else {
uint64_t bias_addr = reinterpret_cast<uint64_t>(void_bias_data);
uint64_t attr_addr = valid_scale
? reinterpret_cast<uint64_t>(void_scale_data)
: bias_addr;
attr_addr = valid_bias
? (valid_scale ? (attr_addr | bias_addr) : attr_addr)
: attr_addr;
data_vec_size = ::min(
phi::GetVectorizedSize<T>(reinterpret_cast<T *>(addr)),
phi::GetVectorizedSize<U>(reinterpret_cast<U *>(attr_addr)));
}
}
for (int size = data_vec_size; size > 0; size /= 2) {
if (cols_per_thread % size == 0) {
vec_size = size;
break;
}
}
}
#define IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, vec_size_) \
case (vec_size_): { \
hipLaunchKernelGGL(( LayerNormFwdWithWelford<index_t, T, U, is_same_, vec_size_>) \
, dim3(block_size), dim3(threads), 0, dev_ctx.stream(), \
x_data, \
y_data, \
static_cast<const scale_t *>(void_scale_data), \
static_cast<const scale_t *>(void_bias_data), \
mean_data, \
var_data, \
static_cast<const U>(epsilon), \
rows, \
cols, \
cols_per_thread, \
valid_scale, \
valid_bias); \
} break
#define IMPL_LAYER_NORM_WELFORD(index_t, scale_t, is_same_) \
IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, 4); \
IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, 2); \
IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, 1);
if (rows < std::numeric_limits<int32_t>::max()) {
if (is_same_type) {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int32_t, T, true); }
} else {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int32_t, U, false); }
}
} else {
if (is_same_type) {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int64_t, T, true); }
} else {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int64_t, U, false); }
}
}
#undef IMPL_LAYER_NORM_WELFORD_CASE
#undef IMPL_LAYER_NORM_WELFORD
}
#endif // PADDLE_WITH_CUDA
template <typename T, typename U>
void LayerNormDirectCUDAFunctor<T, U>::operator()(gpuStream_t stream,
const T *input,
std::vector<int> input_shape,
const U *bias,
const U *scale,
T *output,
U *mean,
U *variance,
int begin_norm_axis,
float eps) {
const auto x_dims = phi::make_ddim(input_shape);
auto matrix_dim = phi::flatten_to_2d(x_dims, begin_norm_axis);
int64_t batch_size = static_cast<int64_t>(matrix_dim[0]);
int64_t feature_size = static_cast<int64_t>(matrix_dim[1]);
switch (phi::funcs::GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( phi::funcs::LayerNormForward<T, U, kBlockDim>)
, dim3(batch_size), dim3(kBlockDim), 0, stream,
input, scale, bias, output, mean, variance, eps, feature_size));
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Product from begin_norm_axis to end in layer_norm must be larger "
"than 1"));
break;
}
}
template class LayerNormDirectCUDAFunctor<float, float>;
template class LayerNormDirectCUDAFunctor<double, double>;
#if defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
template class LayerNormDirectCUDAFunctor<half, float>;
#endif
template <typename T, typename Context>
void LayerNormKernel(const Context &dev_ctx,
const DenseTensor &x,
const paddle::optional<DenseTensor> &scale_opt,
const paddle::optional<DenseTensor> &bias_opt,
float epsilon,
int begin_norm_axis,
DenseTensor *y,
DenseTensor *mean,
DenseTensor *var) {
using U = phi::funcs::LayerNormParamType<T>;
auto *scale = scale_opt.get_ptr();
auto *bias = bias_opt.get_ptr();
const auto x_dims = x.dims();
auto *x_data = x.data<T>();
auto *y_data = dev_ctx.template Alloc<T>(y);
auto *mean_data = dev_ctx.template Alloc<U>(mean);
auto *var_data = dev_ctx.template Alloc<U>(var);
bool valid_scale = (scale != nullptr);
bool valid_bias = (bias != nullptr);
auto *void_scale_data = valid_scale ? scale->data() : nullptr;
auto *void_bias_data = valid_bias ? bias->data() : nullptr;
auto x_dtype = x.dtype();
phi::DataType scale_bias_dtype;
if (valid_scale) {
scale_bias_dtype = scale->dtype();
if (valid_bias) {
PADDLE_ENFORCE_EQ(
scale->dtype(),
bias->dtype(),
phi::errors::InvalidArgument("This Scale and Bias of layer_norm op "
"should have the same data type."));
}
} else {
scale_bias_dtype = valid_bias ? bias->dtype() : x_dtype;
}
bool is_scale_bias_same_dtype_with_x = x_dtype == scale_bias_dtype;
if (!is_scale_bias_same_dtype_with_x) {
PADDLE_ENFORCE_EQ(scale_bias_dtype,
phi::CppTypeToDataType<U>::Type(),
phi::errors::InvalidArgument(
"Unsupported data type of Scale and Bias"));
}
auto matrix_dim = phi::flatten_to_2d(x_dims, begin_norm_axis);
int64_t batch_size = static_cast<int64_t>(matrix_dim[0]);
int64_t feature_size = static_cast<int64_t>(matrix_dim[1]);
auto stream = dev_ctx.stream();
#define PADDLE_LAUNCH_LAYERNORM_FWD(ScaleBiasT, IsScaleBiasSameDTypeWithX) \
do { \
switch (phi::funcs::GetDesiredBlockDim(feature_size)) { \
FIXED_BLOCK_DIM_CASE( \
phi::funcs:: \
hipLaunchKernelGGL(( LayerNormForward<T, U, kBlockDim, IsScaleBiasSameDTypeWithX>) \
, dim3(batch_size), dim3(kBlockDim), 0, stream, \
x_data, \
static_cast<const ScaleBiasT *>(void_scale_data), \
static_cast<const ScaleBiasT *>(void_bias_data), \
y_data, \
mean_data, \
var_data, \
epsilon, \
feature_size)); \
default: \
PADDLE_THROW(phi::errors::InvalidArgument( \
"Product from begin_norm_axis to end must be larger than 1")); \
break; \
} \
} while (0)
#define PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, feature_size) \
case (feature_size): { \
constexpr int WARPS_N = feature_size < 1024 ? 1 : (feature_size / 1024); \
constexpr int WARPS_M = 4 / WARPS_N; \
const int THREADS_PER_WARP = 32; \
const int BYTES_PER_LDG = 16; \
const int VecSize = BYTES_PER_LDG / sizeof(T); \
const int THREADS_PER_CTA = WARPS_N * THREADS_PER_WARP * WARPS_M; \
const int ROWS_PER_CTA = WARPS_M; \
const int grid = static_cast<int>( \
::ceil(batch_size / static_cast<float>(ROWS_PER_CTA))); \
hipLaunchKernelGGL(( phi::funcs::fast_ln_fwd_kernel<T, \
U, \
ScaleT, \
VecSize, \
WARPS_M, \
WARPS_N, \
BYTES_PER_LDG>) \
, dim3(grid), dim3(THREADS_PER_CTA), 0, stream, \
batch_size, \
feature_size, \
epsilon, \
x_data, \
static_cast<const ScaleT *>(void_scale_data), \
static_cast<const ScaleT *>(void_bias_data), \
mean_data, \
var_data, \
y_data); \
} break
#define PADDLE_LAUNCH_FAST_LAYERNORM_FWD(ScaleT) \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 768); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1024); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1280); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1536); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1792); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 2048); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 4096)
#ifdef PADDLE_WITH_CUDA
bool can_call_fast_kernel = false;
if ((feature_size >= 768 && feature_size <= 2048 && feature_size % 256 == 0 ||
feature_size == 4096) &&
scale != nullptr && bias != nullptr) {
// can_call_fast_kernel = true;
can_call_fast_kernel = false;
}
if (can_call_fast_kernel) {
if (is_scale_bias_same_dtype_with_x) {
switch (feature_size) {
PADDLE_LAUNCH_FAST_LAYERNORM_FWD(T);
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Only when feature_size is from 256 to 4096 and is diviaible by "
"256 is supported "
"now"));
break;
}
} else {
switch (feature_size) {
PADDLE_LAUNCH_FAST_LAYERNORM_FWD(U);
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Only when feature_size is from 256 to 4096 and is diviaible by "
"is supported "
"now"));
break;
}
}
} else {
// WarpShuffle intrinsics is involved in LaunchLayerNormKernel.
if (FLAGS_use_fast_math && feature_size <= 1024 &&
(!std::is_same<T, int8_t>::value)) {
LaunchLayerNormKernel<Context, T, U>(dev_ctx,
x_data,
y_data,
void_scale_data,
void_bias_data,
mean_data,
var_data,
epsilon,
batch_size,
feature_size,
valid_scale,
valid_bias,
is_scale_bias_same_dtype_with_x);
} else {
#endif
if (is_scale_bias_same_dtype_with_x) {
PADDLE_LAUNCH_LAYERNORM_FWD(T, true);
} else {
PADDLE_LAUNCH_LAYERNORM_FWD(U, false);
}
#ifdef PADDLE_WITH_CUDA
}
}
#endif
#undef PADDLE_LAUNCH_LAYERNORM_FWD
#undef PADDLE_LAUNCH_FAST_LAYERNORM_FWD
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
PD_REGISTER_KERNEL(layer_norm,
GPU,
ALL_LAYOUT,
phi::LayerNormKernel,
float,
phi::dtype::float16) {
kernel->OutputAt(1).SetDataType(phi::DataType::UNDEFINED);
kernel->OutputAt(2).SetDataType(phi::DataType::UNDEFINED);
}
#elif CUDNN_VERSION_MIN(8, 1, 0)
PD_REGISTER_KERNEL(layer_norm,
GPU,
ALL_LAYOUT,
phi::LayerNormKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {
kernel->OutputAt(1).SetDataType(phi::DataType::UNDEFINED);
kernel->OutputAt(2).SetDataType(phi::DataType::UNDEFINED);
}
#else
PD_REGISTER_KERNEL(layer_norm,
GPU,
ALL_LAYOUT,
phi::LayerNormKernel,
float,
double,
phi::dtype::float16) {
kernel->OutputAt(1).SetDataType(phi::DataType::UNDEFINED);
kernel->OutputAt(2).SetDataType(phi::DataType::UNDEFINED);
}
#endif
| 459764f6a15157d2d2e7aee52d02d4bd147ec211.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/layer_norm_kernel.h"
#include "gflags/gflags.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/layer_norm_impl.cu.h"
#include "paddle/phi/kernels/funcs/layer_norm_util.h"
DECLARE_bool(use_fast_math);
namespace phi {
#ifdef PADDLE_WITH_CUDA
template <typename U>
__device__ inline void WelfordOnline(U val, U *mean, U *square, U *count) {
*count += 1;
U delta1 = val - *mean;
*mean += delta1 / (*count);
U delta2 = val - *mean;
*square += delta1 * delta2;
}
template <typename U>
__device__ inline void WelfordOnline(
U b_mean, U b_square, U b_cnt, U *mean, U *square, U *count) {
if (b_cnt == 0) {
return;
}
U new_cnt = *count + b_cnt;
U nb_n = b_cnt / new_cnt;
U delta = b_mean - *mean;
*mean += delta * nb_n;
*square += b_square + delta * delta * (*count) * nb_n;
*count = new_cnt;
}
template <typename U>
__device__ inline void WelfordWarpAllReduce(U *mean, U *square, U *count) {
constexpr int kWarpSize = 32;
#pragma unroll
for (int mask = 1; mask < kWarpSize; mask *= 2) {
U b_mean = __shfl_down_sync(0xffffffff, *mean, mask);
U b_square = __shfl_down_sync(0xffffffff, *square, mask);
U b_cnt = __shfl_down_sync(0xffffffff, *count, mask);
WelfordOnline<U>(b_mean, b_square, b_cnt, mean, square, count);
}
*mean = __shfl_sync(0xffffffff, *mean, 0, kWarpSize);
*square = __shfl_sync(0xffffffff, *square, 0, kWarpSize);
*count = __shfl_sync(0xffffffff, *count, 0, kWarpSize);
}
template <int VecSize>
struct ThreadAssigner {
__device__ __forceinline__ int operator()(const int cols,
const int cols_per_thread,
int32_t *last_tid_idx) {
return cols_per_thread;
}
};
template <>
struct ThreadAssigner<1> {
__device__ inline int operator()(const int cols,
const int cols_per_thread,
int *last_tid_idx) {
int cols_this_thread = cols_per_thread;
int last_tid = (cols / cols_per_thread);
*last_tid_idx = last_tid;
if (threadIdx.x == last_tid) {
cols_this_thread = cols - cols_per_thread * last_tid;
} else if (threadIdx.x > last_tid) {
cols_this_thread = 0;
}
return cols_this_thread;
}
};
template <typename T, typename U, int VecSize>
struct LayerNormDataReader {
__device__ inline void operator()(const T *__restrict__ row_src,
U *buffer,
const int last_tid_idx,
const int read_times,
const int cols_this_thread) {
using VecT = phi::AlignedVector<T, VecSize>;
const VecT *__restrict__ v_src =
reinterpret_cast<const VecT *__restrict__>(row_src);
for (int i = 0; i < read_times; ++i) {
VecT temp_src = v_src[threadIdx.x + i * blockDim.x];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
buffer[i * VecSize + j] = static_cast<U>(temp_src[j]);
}
}
}
};
template <typename T, typename U>
struct LayerNormDataReader<T, U, 1> {
__device__ inline void operator()(const T *__restrict__ row_src,
U *buffer,
const int last_tid_idx,
const int read_times,
const int cols_this_thread) {
// read_time is just cols_per_thread while VecSize is 1.
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
buffer[i] = static_cast<U>(row_src[threadIdx.x + last_tid_idx * i]);
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
buffer[i] = static_cast<U>(row_src[i + read_times * last_tid_idx]);
}
}
}
};
template <typename T, typename U, bool IsSameType, int VecSize>
struct LayerNormDataWritter {
__device__ inline void operator()(
T *__restrict__ row_dst,
const U *__restrict__ buffer,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ scale,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ bias,
const U row_mean,
const U row_inv_var,
const int write_times,
const int cols_this_thread,
const int last_tid_idx,
const bool valid_scale,
const bool valid_bias) {
using VecT = phi::AlignedVector<T, VecSize>;
using ScaleT = funcs::LayerNormScaleBiasT<T, U, IsSameType>;
using VecScaleT = phi::AlignedVector<ScaleT, VecSize>;
VecT *v_dst = reinterpret_cast<VecT *>(row_dst);
// cols_this_thread is just cols_per_thread
if ((!valid_scale) && (!valid_bias)) {
for (int i = 0; i < write_times; ++i) {
VecT temp_dst;
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>((buffer[i * VecSize + j] - row_mean) *
row_inv_var);
}
v_dst[threadIdx.x + blockDim.x * i] = temp_dst;
}
} else {
const VecScaleT *__restrict__ v_scale =
reinterpret_cast<const VecScaleT *__restrict__>(scale);
const VecScaleT *__restrict__ v_bias =
reinterpret_cast<const VecScaleT *__restrict__>(bias);
if (valid_scale && valid_bias) {
for (int i = 0; i < write_times; ++i) {
int idx = threadIdx.x + blockDim.x * i;
VecT temp_dst;
VecScaleT temp_v_scale = v_scale[idx];
VecScaleT temp_v_bias = v_bias[idx];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>(
static_cast<U>(temp_v_scale[j]) *
(buffer[i * VecSize + j] - row_mean) * row_inv_var +
static_cast<U>(temp_v_bias[j]));
}
v_dst[idx] = temp_dst;
}
} else {
if (valid_scale) {
for (int i = 0; i < write_times; ++i) {
int idx = threadIdx.x + blockDim.x * i;
VecT temp_dst;
VecScaleT temp_v_scale = v_scale[idx];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>(
static_cast<U>(temp_v_scale[j]) *
(buffer[i * VecSize + j] - row_mean) * row_inv_var);
}
v_dst[idx] = temp_dst;
}
} else {
for (int i = 0; i < write_times; ++i) {
int idx = threadIdx.x + blockDim.x * i;
VecT temp_dst;
VecScaleT temp_v_bias = v_bias[idx];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>(
(buffer[i * VecSize + j] - row_mean) * row_inv_var +
static_cast<U>(temp_v_bias[j]));
}
v_dst[idx] = temp_dst;
}
}
}
}
}
};
template <typename T, typename U, bool IsSameType>
struct LayerNormDataWritter<T, U, IsSameType, 1> {
__device__ __forceinline__ void operator()(
T *__restrict__ row_dst,
U *__restrict__ buffer,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ scale,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ bias,
const U row_mean,
const U row_inv_var,
const int write_times,
const int cols_this_thread,
const int last_tid_idx,
const bool valid_scale,
const bool valid_bias) {
// write_times is just col_per_thread.
if ((!valid_scale) && (!valid_bias)) {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
row_dst[threadIdx.x + last_tid_idx * i] =
(buffer[i] - row_mean) * row_inv_var;
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
row_dst[last_tid_idx * write_times + i] =
(buffer[i] - row_mean) * row_inv_var;
}
}
} else if (valid_scale && valid_bias) {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = threadIdx.x + last_tid_idx * i;
row_dst[idx] =
static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = last_tid_idx * write_times + i;
row_dst[idx] =
static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
}
} else {
if (valid_scale) {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = threadIdx.x + last_tid_idx * i;
row_dst[idx] = static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var);
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = last_tid_idx * write_times + i;
row_dst[idx] = static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var);
}
}
} else {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = threadIdx.x + last_tid_idx * i;
row_dst[idx] = static_cast<T>((buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = last_tid_idx * write_times + i;
row_dst[idx] = static_cast<T>((buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
}
}
}
}
};
template <typename IndexT, typename T, typename U, bool IsSameType, int VecSize>
__global__ void LayerNormFwdWithWelford(
const T *__restrict__ src_data,
T *dst_data,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ scale,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ bias,
U *mean,
U *var,
const U epsilon,
const IndexT rows,
const int32_t cols,
const int32_t cols_per_thread,
const bool valid_scale,
const bool valid_bias) {
constexpr int kWarpSize = 32;
int last_tid_idx = 0; // For condition once vecSize is 1.
IndexT row_offset = blockIdx.x * blockDim.y + threadIdx.y;
int cols_this_thread =
ThreadAssigner<VecSize>()(cols, cols_per_thread, &last_tid_idx);
int read_times = cols_per_thread / VecSize;
if (row_offset < rows) {
U buffer[kWarpSize];
U tid_cnt = static_cast<U>(0);
U tid_mean = static_cast<U>(0);
U tid_square = static_cast<U>(0);
const T *__restrict__ row_src = src_data + row_offset * cols;
T *row_dst = dst_data + row_offset * cols;
LayerNormDataReader<T, U, VecSize>()(
row_src, buffer, last_tid_idx, read_times, cols_this_thread);
for (int i = 0; i < cols_this_thread; i++) {
WelfordOnline<U>(buffer[i], &tid_mean, &tid_square, &tid_cnt);
}
U warp_cnt = tid_cnt;
U warp_mean = tid_mean;
U warp_square = tid_square;
WelfordWarpAllReduce<U>(&warp_mean, &warp_square, &warp_cnt);
U row_variance = max(warp_square / warp_cnt, 0.f);
U row_inv_var = funcs::rsqrt_(row_variance + epsilon);
// TODO(limingshu): make code below vectorization.
if (threadIdx.x == 0) {
// warp_mean is just row_mean here.
mean[row_offset] = warp_mean;
var[row_offset] = row_variance;
}
LayerNormDataWritter<T, U, IsSameType, VecSize>()(row_dst,
buffer,
scale,
bias,
warp_mean,
row_inv_var,
read_times,
cols_this_thread,
last_tid_idx,
valid_scale,
valid_bias);
}
}
template <typename Context, typename T, typename U>
void LaunchLayerNormKernel(const Context &dev_ctx,
const T *x_data,
T *y_data,
const void *void_scale_data,
const void *void_bias_data,
U *mean_data,
U *var_data,
float epsilon,
const int64_t rows,
const int cols,
const bool valid_scale,
const bool valid_bias,
const bool is_same_type) {
constexpr int WarpSize = 32;
constexpr int RowPerBlock = 4;
int64_t block_size = (rows + (RowPerBlock - 1)) / RowPerBlock;
dim3 threads(WarpSize, RowPerBlock, 1);
int vec_size = 1;
int cols_per_thread = (cols + (WarpSize - 1)) / WarpSize;
if (cols_per_thread > 1 && (cols % WarpSize == 0)) {
int data_vec_size = 0;
uint64_t addr = (reinterpret_cast<uint64_t>(x_data) |
reinterpret_cast<uint64_t>(y_data));
if (valid_bias || valid_scale) {
if (is_same_type) {
addr = valid_scale
? (addr | reinterpret_cast<uint64_t>(void_scale_data))
: addr;
addr = valid_bias ? (addr | reinterpret_cast<uint64_t>(void_bias_data))
: addr;
data_vec_size = phi::GetVectorizedSize<T>(reinterpret_cast<T *>(addr));
} else {
uint64_t bias_addr = reinterpret_cast<uint64_t>(void_bias_data);
uint64_t attr_addr = valid_scale
? reinterpret_cast<uint64_t>(void_scale_data)
: bias_addr;
attr_addr = valid_bias
? (valid_scale ? (attr_addr | bias_addr) : attr_addr)
: attr_addr;
data_vec_size = std::min(
phi::GetVectorizedSize<T>(reinterpret_cast<T *>(addr)),
phi::GetVectorizedSize<U>(reinterpret_cast<U *>(attr_addr)));
}
}
for (int size = data_vec_size; size > 0; size /= 2) {
if (cols_per_thread % size == 0) {
vec_size = size;
break;
}
}
}
#define IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, vec_size_) \
case (vec_size_): { \
LayerNormFwdWithWelford<index_t, T, U, is_same_, vec_size_> \
<<<block_size, threads, 0, dev_ctx.stream()>>>( \
x_data, \
y_data, \
static_cast<const scale_t *>(void_scale_data), \
static_cast<const scale_t *>(void_bias_data), \
mean_data, \
var_data, \
static_cast<const U>(epsilon), \
rows, \
cols, \
cols_per_thread, \
valid_scale, \
valid_bias); \
} break
#define IMPL_LAYER_NORM_WELFORD(index_t, scale_t, is_same_) \
IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, 4); \
IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, 2); \
IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, 1);
if (rows < std::numeric_limits<int32_t>::max()) {
if (is_same_type) {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int32_t, T, true); }
} else {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int32_t, U, false); }
}
} else {
if (is_same_type) {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int64_t, T, true); }
} else {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int64_t, U, false); }
}
}
#undef IMPL_LAYER_NORM_WELFORD_CASE
#undef IMPL_LAYER_NORM_WELFORD
}
#endif // PADDLE_WITH_CUDA
template <typename T, typename U>
void LayerNormDirectCUDAFunctor<T, U>::operator()(gpuStream_t stream,
const T *input,
std::vector<int> input_shape,
const U *bias,
const U *scale,
T *output,
U *mean,
U *variance,
int begin_norm_axis,
float eps) {
const auto x_dims = phi::make_ddim(input_shape);
auto matrix_dim = phi::flatten_to_2d(x_dims, begin_norm_axis);
int64_t batch_size = static_cast<int64_t>(matrix_dim[0]);
int64_t feature_size = static_cast<int64_t>(matrix_dim[1]);
switch (phi::funcs::GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
phi::funcs::LayerNormForward<T, U, kBlockDim>
<<<batch_size, kBlockDim, 0, stream>>>(
input, scale, bias, output, mean, variance, eps, feature_size));
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Product from begin_norm_axis to end in layer_norm must be larger "
"than 1"));
break;
}
}
template class LayerNormDirectCUDAFunctor<float, float>;
template class LayerNormDirectCUDAFunctor<double, double>;
#if defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
template class LayerNormDirectCUDAFunctor<half, float>;
#endif
template <typename T, typename Context>
void LayerNormKernel(const Context &dev_ctx,
const DenseTensor &x,
const paddle::optional<DenseTensor> &scale_opt,
const paddle::optional<DenseTensor> &bias_opt,
float epsilon,
int begin_norm_axis,
DenseTensor *y,
DenseTensor *mean,
DenseTensor *var) {
using U = phi::funcs::LayerNormParamType<T>;
auto *scale = scale_opt.get_ptr();
auto *bias = bias_opt.get_ptr();
const auto x_dims = x.dims();
auto *x_data = x.data<T>();
auto *y_data = dev_ctx.template Alloc<T>(y);
auto *mean_data = dev_ctx.template Alloc<U>(mean);
auto *var_data = dev_ctx.template Alloc<U>(var);
bool valid_scale = (scale != nullptr);
bool valid_bias = (bias != nullptr);
auto *void_scale_data = valid_scale ? scale->data() : nullptr;
auto *void_bias_data = valid_bias ? bias->data() : nullptr;
auto x_dtype = x.dtype();
phi::DataType scale_bias_dtype;
if (valid_scale) {
scale_bias_dtype = scale->dtype();
if (valid_bias) {
PADDLE_ENFORCE_EQ(
scale->dtype(),
bias->dtype(),
phi::errors::InvalidArgument("This Scale and Bias of layer_norm op "
"should have the same data type."));
}
} else {
scale_bias_dtype = valid_bias ? bias->dtype() : x_dtype;
}
bool is_scale_bias_same_dtype_with_x = x_dtype == scale_bias_dtype;
if (!is_scale_bias_same_dtype_with_x) {
PADDLE_ENFORCE_EQ(scale_bias_dtype,
phi::CppTypeToDataType<U>::Type(),
phi::errors::InvalidArgument(
"Unsupported data type of Scale and Bias"));
}
auto matrix_dim = phi::flatten_to_2d(x_dims, begin_norm_axis);
int64_t batch_size = static_cast<int64_t>(matrix_dim[0]);
int64_t feature_size = static_cast<int64_t>(matrix_dim[1]);
auto stream = dev_ctx.stream();
#define PADDLE_LAUNCH_LAYERNORM_FWD(ScaleBiasT, IsScaleBiasSameDTypeWithX) \
do { \
switch (phi::funcs::GetDesiredBlockDim(feature_size)) { \
FIXED_BLOCK_DIM_CASE( \
phi::funcs:: \
LayerNormForward<T, U, kBlockDim, IsScaleBiasSameDTypeWithX> \
<<<batch_size, kBlockDim, 0, stream>>>( \
x_data, \
static_cast<const ScaleBiasT *>(void_scale_data), \
static_cast<const ScaleBiasT *>(void_bias_data), \
y_data, \
mean_data, \
var_data, \
epsilon, \
feature_size)); \
default: \
PADDLE_THROW(phi::errors::InvalidArgument( \
"Product from begin_norm_axis to end must be larger than 1")); \
break; \
} \
} while (0)
#define PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, feature_size) \
case (feature_size): { \
constexpr int WARPS_N = feature_size < 1024 ? 1 : (feature_size / 1024); \
constexpr int WARPS_M = 4 / WARPS_N; \
const int THREADS_PER_WARP = 32; \
const int BYTES_PER_LDG = 16; \
const int VecSize = BYTES_PER_LDG / sizeof(T); \
const int THREADS_PER_CTA = WARPS_N * THREADS_PER_WARP * WARPS_M; \
const int ROWS_PER_CTA = WARPS_M; \
const int grid = static_cast<int>( \
std::ceil(batch_size / static_cast<float>(ROWS_PER_CTA))); \
phi::funcs::fast_ln_fwd_kernel<T, \
U, \
ScaleT, \
VecSize, \
WARPS_M, \
WARPS_N, \
BYTES_PER_LDG> \
<<<grid, THREADS_PER_CTA, 0, stream>>>( \
batch_size, \
feature_size, \
epsilon, \
x_data, \
static_cast<const ScaleT *>(void_scale_data), \
static_cast<const ScaleT *>(void_bias_data), \
mean_data, \
var_data, \
y_data); \
} break
#define PADDLE_LAUNCH_FAST_LAYERNORM_FWD(ScaleT) \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 768); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1024); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1280); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1536); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1792); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 2048); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 4096)
#ifdef PADDLE_WITH_CUDA
bool can_call_fast_kernel = false;
if ((feature_size >= 768 && feature_size <= 2048 && feature_size % 256 == 0 ||
feature_size == 4096) &&
scale != nullptr && bias != nullptr) {
// can_call_fast_kernel = true;
can_call_fast_kernel = false;
}
if (can_call_fast_kernel) {
if (is_scale_bias_same_dtype_with_x) {
switch (feature_size) {
PADDLE_LAUNCH_FAST_LAYERNORM_FWD(T);
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Only when feature_size is from 256 to 4096 and is diviaible by "
"256 is supported "
"now"));
break;
}
} else {
switch (feature_size) {
PADDLE_LAUNCH_FAST_LAYERNORM_FWD(U);
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Only when feature_size is from 256 to 4096 and is diviaible by "
"is supported "
"now"));
break;
}
}
} else {
// WarpShuffle intrinsics is involved in LaunchLayerNormKernel.
if (FLAGS_use_fast_math && feature_size <= 1024 &&
(!std::is_same<T, int8_t>::value)) {
LaunchLayerNormKernel<Context, T, U>(dev_ctx,
x_data,
y_data,
void_scale_data,
void_bias_data,
mean_data,
var_data,
epsilon,
batch_size,
feature_size,
valid_scale,
valid_bias,
is_scale_bias_same_dtype_with_x);
} else {
#endif
if (is_scale_bias_same_dtype_with_x) {
PADDLE_LAUNCH_LAYERNORM_FWD(T, true);
} else {
PADDLE_LAUNCH_LAYERNORM_FWD(U, false);
}
#ifdef PADDLE_WITH_CUDA
}
}
#endif
#undef PADDLE_LAUNCH_LAYERNORM_FWD
#undef PADDLE_LAUNCH_FAST_LAYERNORM_FWD
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
PD_REGISTER_KERNEL(layer_norm,
GPU,
ALL_LAYOUT,
phi::LayerNormKernel,
float,
phi::dtype::float16) {
kernel->OutputAt(1).SetDataType(phi::DataType::UNDEFINED);
kernel->OutputAt(2).SetDataType(phi::DataType::UNDEFINED);
}
#elif CUDNN_VERSION_MIN(8, 1, 0)
PD_REGISTER_KERNEL(layer_norm,
GPU,
ALL_LAYOUT,
phi::LayerNormKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {
kernel->OutputAt(1).SetDataType(phi::DataType::UNDEFINED);
kernel->OutputAt(2).SetDataType(phi::DataType::UNDEFINED);
}
#else
PD_REGISTER_KERNEL(layer_norm,
GPU,
ALL_LAYOUT,
phi::LayerNormKernel,
float,
double,
phi::dtype::float16) {
kernel->OutputAt(1).SetDataType(phi::DataType::UNDEFINED);
kernel->OutputAt(2).SetDataType(phi::DataType::UNDEFINED);
}
#endif
|
bda2167b131629f6afc6b69a3ecd41f6c06748f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from tfqmr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_ztfqmr_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex sigma,
magmaDoubleComplex *v,
magmaDoubleComplex *Au,
magmaDoubleComplex *u_m,
magmaDoubleComplex *pu_m,
magmaDoubleComplex *u_mp1,
magmaDoubleComplex *w,
magmaDoubleComplex *d,
magmaDoubleComplex *Ad )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
u_mp1[ i+j*num_rows ] = u_m[ i+j*num_rows ] - alpha * v[ i+j*num_rows ];
w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ];
d[ i+j*num_rows ] = pu_m[ i+j*num_rows ] + sigma * d[ i+j*num_rows ];
Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u_mp1 = u_mp1 - alpha*v;
w = w - alpha*Au;
d = pu_m + sigma*d;
Ad = Au + sigma*Ad;
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
sigma magmaDoubleComplex
scalar
@param[in]
v magmaDoubleComplex_ptr
vector
@param[in]
Au magmaDoubleComplex_ptr
vector
@param[in,out]
u_m magmaDoubleComplex_ptr
vector
@param[in,out]
pu_m magmaDoubleComplex_ptr
vector
@param[in,out]
u_mp1 magmaDoubleComplex_ptr
vector
@param[in,out]
w magmaDoubleComplex_ptr
vector
@param[in,out]
d magmaDoubleComplex_ptr
vector
@param[in,out]
Ad magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ztfqmr_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex sigma,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr Au,
magmaDoubleComplex_ptr u_m,
magmaDoubleComplex_ptr pu_m,
magmaDoubleComplex_ptr u_mp1,
magmaDoubleComplex_ptr w,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr Ad,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_ztfqmr_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, sigma,
v, Au, u_m, pu_m, u_mp1, w, d, Ad );
return MAGMA_SUCCESS;
}
__global__ void
magma_ztfqmr_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex eta,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr Ad,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ] + eta * d[ i+j*num_rows ];
r[ i+j*num_rows ] = r[ i+j*num_rows ] - eta * Ad[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + eta * d
r = r - eta * Ad
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
eta magmaDoubleComplex
scalar
@param[in]
d magmaDoubleComplex_ptr
vector
@param[in]
Ad magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ztfqmr_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex eta,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr Ad,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_ztfqmr_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, eta, d, Ad, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_ztfqmr_3_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex *w,
magmaDoubleComplex *u_m,
magmaDoubleComplex *u_mp1 )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
u_mp1[ i+j*num_rows ] = w[ i+j*num_rows ] + beta * u_m[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u_mp1 = w + beta*u_mp1
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
w magmaDoubleComplex_ptr
vector
@param[in]
u_m magmaDoubleComplex_ptr
vector
@param[in,out]
u_mp1 magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ztfqmr_3(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr w,
magmaDoubleComplex_ptr u_m,
magmaDoubleComplex_ptr u_mp1,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_ztfqmr_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, w, u_m, u_mp1 );
return MAGMA_SUCCESS;
}
__global__ void
magma_ztfqmr_4_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex *Au_new,
magmaDoubleComplex *v,
magmaDoubleComplex *Au )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp = Au_new[ i+j*num_rows ];
v[ i+j*num_rows ] = tmp + beta * Au[ i+j*num_rows ]
+ beta * beta * v[ i+j*num_rows ];
Au[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Merges multiple operations into one kernel:
v = Au_new + beta*(Au+beta*v);
Au = Au_new
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
Au_new magmaDoubleComplex_ptr
vector
@param[in,out]
v magmaDoubleComplex_ptr
vector
@param[in,out]
Au magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ztfqmr_4(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr Au_new,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr Au,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_ztfqmr_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, Au_new, v, Au );
return MAGMA_SUCCESS;
}
__global__ void
magma_ztfqmr_5_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex sigma,
magmaDoubleComplex *v,
magmaDoubleComplex *Au,
magmaDoubleComplex *u_mp1,
magmaDoubleComplex *w,
magmaDoubleComplex *d,
magmaDoubleComplex *Ad )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ];
d[ i+j*num_rows ] = u_mp1[ i+j*num_rows ] + sigma * d[ i+j*num_rows ];
Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
w = w - alpha*Au;
d = pu_m + sigma*d;
Ad = Au + sigma*Ad;
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
sigma magmaDoubleComplex
scalar
@param[in]
v magmaDoubleComplex_ptr
vector
@param[in]
Au magmaDoubleComplex_ptr
vector
@param[in,out]
u_mp1 magmaDoubleComplex_ptr
vector
@param[in,out]
w magmaDoubleComplex_ptr
vector
@param[in,out]
d magmaDoubleComplex_ptr
vector
@param[in,out]
Ad magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ztfqmr_5(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex sigma,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr Au,
magmaDoubleComplex_ptr u_mp1,
magmaDoubleComplex_ptr w,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr Ad,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_ztfqmr_5_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, sigma,
v, Au, u_mp1, w, d, Ad );
return MAGMA_SUCCESS;
}
| bda2167b131629f6afc6b69a3ecd41f6c06748f7.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from tfqmr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_ztfqmr_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex sigma,
magmaDoubleComplex *v,
magmaDoubleComplex *Au,
magmaDoubleComplex *u_m,
magmaDoubleComplex *pu_m,
magmaDoubleComplex *u_mp1,
magmaDoubleComplex *w,
magmaDoubleComplex *d,
magmaDoubleComplex *Ad )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
u_mp1[ i+j*num_rows ] = u_m[ i+j*num_rows ] - alpha * v[ i+j*num_rows ];
w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ];
d[ i+j*num_rows ] = pu_m[ i+j*num_rows ] + sigma * d[ i+j*num_rows ];
Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u_mp1 = u_mp1 - alpha*v;
w = w - alpha*Au;
d = pu_m + sigma*d;
Ad = Au + sigma*Ad;
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
sigma magmaDoubleComplex
scalar
@param[in]
v magmaDoubleComplex_ptr
vector
@param[in]
Au magmaDoubleComplex_ptr
vector
@param[in,out]
u_m magmaDoubleComplex_ptr
vector
@param[in,out]
pu_m magmaDoubleComplex_ptr
vector
@param[in,out]
u_mp1 magmaDoubleComplex_ptr
vector
@param[in,out]
w magmaDoubleComplex_ptr
vector
@param[in,out]
d magmaDoubleComplex_ptr
vector
@param[in,out]
Ad magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ztfqmr_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex sigma,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr Au,
magmaDoubleComplex_ptr u_m,
magmaDoubleComplex_ptr pu_m,
magmaDoubleComplex_ptr u_mp1,
magmaDoubleComplex_ptr w,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr Ad,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_ztfqmr_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, sigma,
v, Au, u_m, pu_m, u_mp1, w, d, Ad );
return MAGMA_SUCCESS;
}
__global__ void
magma_ztfqmr_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex eta,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr Ad,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ] + eta * d[ i+j*num_rows ];
r[ i+j*num_rows ] = r[ i+j*num_rows ] - eta * Ad[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + eta * d
r = r - eta * Ad
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
eta magmaDoubleComplex
scalar
@param[in]
d magmaDoubleComplex_ptr
vector
@param[in]
Ad magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ztfqmr_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex eta,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr Ad,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_ztfqmr_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, eta, d, Ad, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_ztfqmr_3_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex *w,
magmaDoubleComplex *u_m,
magmaDoubleComplex *u_mp1 )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
u_mp1[ i+j*num_rows ] = w[ i+j*num_rows ] + beta * u_m[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u_mp1 = w + beta*u_mp1
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
w magmaDoubleComplex_ptr
vector
@param[in]
u_m magmaDoubleComplex_ptr
vector
@param[in,out]
u_mp1 magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ztfqmr_3(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr w,
magmaDoubleComplex_ptr u_m,
magmaDoubleComplex_ptr u_mp1,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_ztfqmr_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, w, u_m, u_mp1 );
return MAGMA_SUCCESS;
}
__global__ void
magma_ztfqmr_4_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex *Au_new,
magmaDoubleComplex *v,
magmaDoubleComplex *Au )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp = Au_new[ i+j*num_rows ];
v[ i+j*num_rows ] = tmp + beta * Au[ i+j*num_rows ]
+ beta * beta * v[ i+j*num_rows ];
Au[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Merges multiple operations into one kernel:
v = Au_new + beta*(Au+beta*v);
Au = Au_new
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
Au_new magmaDoubleComplex_ptr
vector
@param[in,out]
v magmaDoubleComplex_ptr
vector
@param[in,out]
Au magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ztfqmr_4(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr Au_new,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr Au,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_ztfqmr_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, Au_new, v, Au );
return MAGMA_SUCCESS;
}
__global__ void
magma_ztfqmr_5_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex sigma,
magmaDoubleComplex *v,
magmaDoubleComplex *Au,
magmaDoubleComplex *u_mp1,
magmaDoubleComplex *w,
magmaDoubleComplex *d,
magmaDoubleComplex *Ad )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ];
d[ i+j*num_rows ] = u_mp1[ i+j*num_rows ] + sigma * d[ i+j*num_rows ];
Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
w = w - alpha*Au;
d = pu_m + sigma*d;
Ad = Au + sigma*Ad;
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
sigma magmaDoubleComplex
scalar
@param[in]
v magmaDoubleComplex_ptr
vector
@param[in]
Au magmaDoubleComplex_ptr
vector
@param[in,out]
u_mp1 magmaDoubleComplex_ptr
vector
@param[in,out]
w magmaDoubleComplex_ptr
vector
@param[in,out]
d magmaDoubleComplex_ptr
vector
@param[in,out]
Ad magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ztfqmr_5(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex sigma,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr Au,
magmaDoubleComplex_ptr u_mp1,
magmaDoubleComplex_ptr w,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr Ad,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_ztfqmr_5_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, sigma,
v, Au, u_mp1, w, d, Ad );
return MAGMA_SUCCESS;
}
|
a5186ea685d1439f571627954083217e35821de2.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "ExtractTextRegions.h"
template<typename T>
__device__ __forceinline__ void write_pixel(T* rgb, std::int32_t batch, std::int32_t x, std::int32_t y, T r, T g, T b, std::int32_t width, std::int32_t height)
{
*(rgb + width * height * 3 * batch + width * height * 0 + width * y + x) = r;
*(rgb + width * height * 3 * batch + width * height * 1 + width * y + x) = g;
*(rgb + width * height * 3 * batch + width * height * 2 + width * y + x) = b;
}
__device__ __forceinline__ uchar3 read_pixel(std::uint8_t const* const rgb, std::int32_t batch, std::int32_t x, std::int32_t y, std::int32_t width, std::int32_t height)
{
uchar r, g, b;
r = *(rgb + width * height * 3 * batch + width * height * 0 + width * y + x);
g = *(rgb + width * height * 3 * batch + width * height * 1 + width * y + x);
b = *(rgb + width * height * 3 * batch + width * height * 2 + width * y + x);
return uchar3{ r, g, b };
}
__global__ void ExtractTextRegions_kernel(
std::uint8_t const* const frames,
BBox const* const bboxes,
subtitle_index const* const bbox_frame_map,
std::uint8_t* regions,
std::int32_t frame_width,
std::int32_t frame_height,
std::int32_t region_width, // multiple of 32
std::int32_t region_height // 32
)
{
std::int32_t ix(blockIdx.x * blockDim.x + threadIdx.x), iy(blockIdx.y * blockDim.y + threadIdx.y), iz(blockIdx.z);
auto const& box(bboxes[iz]);
auto ratio(__int2float_rz(box.width) / __int2float_rz(box.height));
auto new_width(__float2int_rz(__int2float_rz(region_height) * ratio));
auto src_frame_idx(bbox_frame_map[iz].first);
if (new_width <= region_width)
{
// right pad zero
if (ix >= new_width)
{
// set zero
write_pixel(regions, iz, ix, iy, (std::uint8_t)0, (std::uint8_t)0, (std::uint8_t)0, region_width, region_height);
}
else
{
// nearest sample
float2 uv{ __int2float_rz(ix) / __int2float_rz(new_width - 1), __int2float_rz(iy) / __int2float_rz(region_height - 1) };
int2 pos{ __float2int_rz(uv.x * __int2float_rz(box.width - 1) + 0.5f), __float2int_rz(uv.y * __int2float_rz(box.height - 1) + 0.5f) };
auto rgb(read_pixel(frames, src_frame_idx, pos.x + box.x, pos.y + box.y, frame_width, frame_height));
write_pixel(regions, iz, ix, iy, rgb.x, rgb.y, rgb.z, region_width, region_height);
}
}
else
{
// pad top and bottom zero
auto new_height(__float2int_rz(__int2float_rz(region_width) / ratio));
new_height += new_height & 1; // make multiple of 2
auto pad_height((region_height - new_height) / 2);
if (iy < pad_height || iy >= pad_height + new_height)
{
// set zero
write_pixel(regions, iz, ix, iy, (std::uint8_t)0, (std::uint8_t)0, (std::uint8_t)0, region_width, region_height);
}
else
{
// nearest sample
float2 uv{ __int2float_rz(ix) / __int2float_rz(region_width - 1), __int2float_rz(iy - pad_height) / __int2float_rz(new_height - 1) };
int2 pos{ __float2int_rz(uv.x * __int2float_rz(box.width - 1) + 0.5f), __float2int_rz(uv.y * __int2float_rz(box.height - 1) + 0.5f) };
auto rgb(read_pixel(frames, src_frame_idx, pos.x + box.x, pos.y + box.y, frame_width, frame_height));
write_pixel(regions, iz, ix, iy, rgb.x, rgb.y, rgb.z, region_width, region_height);
}
}
}
void ExtractTextRegions(
cudawrapper::CUDADeviceMemoryUnique<std::uint8_t> const& in_frames,
cudawrapper::CUDADeviceMemoryUnique<std::uint8_t>& out_extracted_regions,
cudawrapper::CUDADeviceMemoryUnique<BBox>& tmp_contiguous_bboxes_gpu,
cudawrapper::CUDADeviceMemoryUnique<subtitle_index>& tmp_contiguous_individual_map_gpu,
std::vector<BBox> const& bboxes_contiguous,
std::vector<subtitle_index> contiguous_individual_map,
std::int32_t width,
std::int32_t height,
std::int32_t text_region_height, // this is 32
std::int32_t text_region_max_width,
std::int32_t *out_text_region_width,
hipStream_t stream
)
{
assert(text_region_height == 32);
std::size_t num_text_regions(bboxes_contiguous.size());
// allocate space for tmp
if (tmp_contiguous_bboxes_gpu.empty() || tmp_contiguous_bboxes_gpu.size() < num_text_regions)
tmp_contiguous_bboxes_gpu.reallocate(num_text_regions);
if (tmp_contiguous_individual_map_gpu.empty() || tmp_contiguous_individual_map_gpu.size() < num_text_regions)
tmp_contiguous_individual_map_gpu.reallocate(num_text_regions);
// upload bboxes
tmp_contiguous_bboxes_gpu.upload(bboxes_contiguous, stream);
tmp_contiguous_individual_map_gpu.upload(contiguous_individual_map, stream);
// find max region width
std::int32_t text_region_width(-1);
for (auto const& box : bboxes_contiguous)
{
auto ratio(static_cast<float>(box.width) / static_cast<float>(box.height));
auto new_width(static_cast<std::int32_t>(static_cast<float>(text_region_height) * ratio));
text_region_width = ::max(text_region_width, new_width);
}
text_region_width = text_region_height * ((text_region_width - 1) / text_region_height + 1);
text_region_width = ::min(text_region_width, text_region_max_width);
assert(text_region_width % 32 == 0);
// allocate space for output
if (out_extracted_regions.empty() || out_extracted_regions.size() < num_text_regions * 3 * text_region_height * text_region_width)
out_extracted_regions.reallocate(num_text_regions * 3 * text_region_height * text_region_width);
dim3 block(32, 32, 1);
dim3 grid(text_region_width / 32, text_region_height / 32, num_text_regions);
//if (num_text_regions == 91)
// __debugbreak();
hipLaunchKernelGGL(( ExtractTextRegions_kernel), dim3(grid), dim3(block), 0, stream,
in_frames,
tmp_contiguous_bboxes_gpu,
tmp_contiguous_individual_map_gpu,
out_extracted_regions,
width,
height,
text_region_width,
text_region_height
);
ck2(hipGetLastError());
if (out_text_region_width)
*out_text_region_width = text_region_width;
}
| a5186ea685d1439f571627954083217e35821de2.cu | #pragma once
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "ExtractTextRegions.h"
template<typename T>
__device__ __forceinline__ void write_pixel(T* rgb, std::int32_t batch, std::int32_t x, std::int32_t y, T r, T g, T b, std::int32_t width, std::int32_t height)
{
*(rgb + width * height * 3 * batch + width * height * 0 + width * y + x) = r;
*(rgb + width * height * 3 * batch + width * height * 1 + width * y + x) = g;
*(rgb + width * height * 3 * batch + width * height * 2 + width * y + x) = b;
}
__device__ __forceinline__ uchar3 read_pixel(std::uint8_t const* const rgb, std::int32_t batch, std::int32_t x, std::int32_t y, std::int32_t width, std::int32_t height)
{
uchar r, g, b;
r = *(rgb + width * height * 3 * batch + width * height * 0 + width * y + x);
g = *(rgb + width * height * 3 * batch + width * height * 1 + width * y + x);
b = *(rgb + width * height * 3 * batch + width * height * 2 + width * y + x);
return uchar3{ r, g, b };
}
__global__ void ExtractTextRegions_kernel(
std::uint8_t const* const frames,
BBox const* const bboxes,
subtitle_index const* const bbox_frame_map,
std::uint8_t* regions,
std::int32_t frame_width,
std::int32_t frame_height,
std::int32_t region_width, // multiple of 32
std::int32_t region_height // 32
)
{
std::int32_t ix(blockIdx.x * blockDim.x + threadIdx.x), iy(blockIdx.y * blockDim.y + threadIdx.y), iz(blockIdx.z);
auto const& box(bboxes[iz]);
auto ratio(__int2float_rz(box.width) / __int2float_rz(box.height));
auto new_width(__float2int_rz(__int2float_rz(region_height) * ratio));
auto src_frame_idx(bbox_frame_map[iz].first);
if (new_width <= region_width)
{
// right pad zero
if (ix >= new_width)
{
// set zero
write_pixel(regions, iz, ix, iy, (std::uint8_t)0, (std::uint8_t)0, (std::uint8_t)0, region_width, region_height);
}
else
{
// nearest sample
float2 uv{ __int2float_rz(ix) / __int2float_rz(new_width - 1), __int2float_rz(iy) / __int2float_rz(region_height - 1) };
int2 pos{ __float2int_rz(uv.x * __int2float_rz(box.width - 1) + 0.5f), __float2int_rz(uv.y * __int2float_rz(box.height - 1) + 0.5f) };
auto rgb(read_pixel(frames, src_frame_idx, pos.x + box.x, pos.y + box.y, frame_width, frame_height));
write_pixel(regions, iz, ix, iy, rgb.x, rgb.y, rgb.z, region_width, region_height);
}
}
else
{
// pad top and bottom zero
auto new_height(__float2int_rz(__int2float_rz(region_width) / ratio));
new_height += new_height & 1; // make multiple of 2
auto pad_height((region_height - new_height) / 2);
if (iy < pad_height || iy >= pad_height + new_height)
{
// set zero
write_pixel(regions, iz, ix, iy, (std::uint8_t)0, (std::uint8_t)0, (std::uint8_t)0, region_width, region_height);
}
else
{
// nearest sample
float2 uv{ __int2float_rz(ix) / __int2float_rz(region_width - 1), __int2float_rz(iy - pad_height) / __int2float_rz(new_height - 1) };
int2 pos{ __float2int_rz(uv.x * __int2float_rz(box.width - 1) + 0.5f), __float2int_rz(uv.y * __int2float_rz(box.height - 1) + 0.5f) };
auto rgb(read_pixel(frames, src_frame_idx, pos.x + box.x, pos.y + box.y, frame_width, frame_height));
write_pixel(regions, iz, ix, iy, rgb.x, rgb.y, rgb.z, region_width, region_height);
}
}
}
void ExtractTextRegions(
cudawrapper::CUDADeviceMemoryUnique<std::uint8_t> const& in_frames,
cudawrapper::CUDADeviceMemoryUnique<std::uint8_t>& out_extracted_regions,
cudawrapper::CUDADeviceMemoryUnique<BBox>& tmp_contiguous_bboxes_gpu,
cudawrapper::CUDADeviceMemoryUnique<subtitle_index>& tmp_contiguous_individual_map_gpu,
std::vector<BBox> const& bboxes_contiguous,
std::vector<subtitle_index> contiguous_individual_map,
std::int32_t width,
std::int32_t height,
std::int32_t text_region_height, // this is 32
std::int32_t text_region_max_width,
std::int32_t *out_text_region_width,
CUstream stream
)
{
assert(text_region_height == 32);
std::size_t num_text_regions(bboxes_contiguous.size());
// allocate space for tmp
if (tmp_contiguous_bboxes_gpu.empty() || tmp_contiguous_bboxes_gpu.size() < num_text_regions)
tmp_contiguous_bboxes_gpu.reallocate(num_text_regions);
if (tmp_contiguous_individual_map_gpu.empty() || tmp_contiguous_individual_map_gpu.size() < num_text_regions)
tmp_contiguous_individual_map_gpu.reallocate(num_text_regions);
// upload bboxes
tmp_contiguous_bboxes_gpu.upload(bboxes_contiguous, stream);
tmp_contiguous_individual_map_gpu.upload(contiguous_individual_map, stream);
// find max region width
std::int32_t text_region_width(-1);
for (auto const& box : bboxes_contiguous)
{
auto ratio(static_cast<float>(box.width) / static_cast<float>(box.height));
auto new_width(static_cast<std::int32_t>(static_cast<float>(text_region_height) * ratio));
text_region_width = std::max(text_region_width, new_width);
}
text_region_width = text_region_height * ((text_region_width - 1) / text_region_height + 1);
text_region_width = std::min(text_region_width, text_region_max_width);
assert(text_region_width % 32 == 0);
// allocate space for output
if (out_extracted_regions.empty() || out_extracted_regions.size() < num_text_regions * 3 * text_region_height * text_region_width)
out_extracted_regions.reallocate(num_text_regions * 3 * text_region_height * text_region_width);
dim3 block(32, 32, 1);
dim3 grid(text_region_width / 32, text_region_height / 32, num_text_regions);
//if (num_text_regions == 91)
// __debugbreak();
ExtractTextRegions_kernel<<<grid, block, 0, stream>>>(
in_frames,
tmp_contiguous_bboxes_gpu,
tmp_contiguous_individual_map_gpu,
out_extracted_regions,
width,
height,
text_region_width,
text_region_height
);
ck2(cudaGetLastError());
if (out_text_region_width)
*out_text_region_width = text_region_width;
}
|
f2c6433e130606e7540e13eb61cbb73b2d5cf5ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "hip/hip_runtime.h"
//pass
//--blockDim=10 --gridDim=64 --no-inline
__global__ void foo() {
__shared__ int A[10][10];
A[threadIdx.y][threadIdx.x] = 2;
__assert(A[threadIdx.y][threadIdx.x]!=2);
}
| f2c6433e130606e7540e13eb61cbb73b2d5cf5ce.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "cuda.h"
//pass
//--blockDim=10 --gridDim=64 --no-inline
__global__ void foo() {
__shared__ int A[10][10];
A[threadIdx.y][threadIdx.x] = 2;
__assert(A[threadIdx.y][threadIdx.x]!=2);
}
|
934ec8f79ee2ecdadb2bba8dda00f04069111ed9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2012 The Trustees of Indiana University. All rights reserved.
CGL MapReduce Framework on GPUs and CPUs
Code Name: Panda
File: PandaLib.cu
First Version: 2012-07-01 V0.1
Current Version: V0.3
Last Updates: 2012-8-29
Developer: Hui Li (lihui@indiana.edu)
This is the source code for Panda, a MapReduce runtime on GPUs and CPUs.
*/
#ifndef __PANDALIB_CU__
#define __PANDALIB_CU__
#include "Panda.h"
#include "stdlib.h"
#include "map.cu"
#include "reduce.cu"
//----------------------------------------------
//Get default runtime configuration
//return: default spec
//----------------------------------------------
job_configuration *GetJobConf(){
job_configuration *job_conf = (job_configuration *)malloc(sizeof(job_configuration));
if (job_conf == NULL) exit(-1);
memset(job_conf, 0, sizeof(job_configuration));
job_conf->num_input_record = 0;
job_conf->input_keyval_arr = NULL;
job_conf->auto_tuning = false;
job_conf->num_mappers = 0;
job_conf->num_reducers = 0;
job_conf->num_gpus = 0;
job_conf->num_cpus_cores = 0;
job_conf->num_cpus_groups = 0;
return job_conf;
}//gpu_context
gpu_context *GetGPUContext(){
gpu_context *d_g_state = (gpu_context*)malloc(sizeof(gpu_context));
if (d_g_state == NULL) exit(-1);
memset(d_g_state, 0, sizeof(gpu_context));
d_g_state->configured = false;
d_g_state->h_input_keyval_arr = NULL;
d_g_state->num_mappers = 0;
d_g_state->num_reducers = 0;
return d_g_state;
}//gpu_context
cpu_context *GetCPUContext(){
cpu_context *d_g_state = (cpu_context*)malloc(sizeof(cpu_context));
if (d_g_state == NULL) exit(-1);
memset(d_g_state, 0, sizeof(cpu_context));
d_g_state->configured = false;
d_g_state->input_keyval_arr = NULL;
return d_g_state;
}//gpu_context
panda_context *GetPandaContext(){
panda_context *d_g_state = (panda_context*)malloc(sizeof(panda_context));
if (d_g_state == NULL) exit(-1);
d_g_state->input_keyval_arr = NULL;
d_g_state->intermediate_keyval_arr_arr_p = NULL;
d_g_state->sorted_intermediate_keyvals_arr = NULL;
d_g_state->sorted_keyvals_arr_len = 0;
d_g_state->num_gpus = 0;
d_g_state->gpu_context = NULL;
d_g_state->num_cpus_groups = 0;
d_g_state->cpu_context = NULL;
return d_g_state;
}//gpu_context
//For version 0.3
void InitCPUMapReduce2(thread_info_t * thread_info){
cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state);
job_configuration *job_conf = (job_configuration *)(thread_info->job_conf);
if (job_conf->num_input_record<=0) { DoLog("Error: no any input keys"); exit(-1);}
if (job_conf->input_keyval_arr == NULL) { DoLog("Error: input_keyval_arr == NULL"); exit(-1);}
if (d_g_state->num_cpus_cores <= 0) { DoLog("Error: d_g_state->num_cpus == 0"); exit(-1);}
//DoLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false");
//if (d_g_state->configured)
// return;
int totalKeySize = 0;
int totalValSize = 0;
for(int i=0;i<job_conf->num_input_record;i++){
totalKeySize += job_conf->input_keyval_arr[i].keySize;
totalValSize += job_conf->input_keyval_arr[i].valSize;
}//for
DoLog("CPU_GROUP_ID:[%d] num_input_record:%d, totalKeySize:%d totalValSize:%d num_cpus:%d",
d_g_state->cpu_group_id, job_conf->num_input_record, totalKeySize, totalValSize, d_g_state->num_cpus_cores);
//TODO determin num_cpus
//d_g_state->num_cpus = 12;
int num_cpus_cores = d_g_state->num_cpus_cores;
d_g_state->panda_cpu_task = (pthread_t *)malloc(sizeof(pthread_t)*(num_cpus_cores));
d_g_state->panda_cpu_task_info = (panda_cpu_task_info_t *)malloc(sizeof(panda_cpu_task_info_t)*(num_cpus_cores));
d_g_state->intermediate_keyval_arr_arr_p = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*job_conf->num_input_record);
memset(d_g_state->intermediate_keyval_arr_arr_p, 0, sizeof(keyval_arr_t)*job_conf->num_input_record);
for (int i=0;i<num_cpus_cores;i++){
d_g_state->panda_cpu_task_info[i].d_g_state = d_g_state;
d_g_state->panda_cpu_task_info[i].cpu_job_conf = job_conf;
d_g_state->panda_cpu_task_info[i].num_cpus_cores = num_cpus_cores;
d_g_state->panda_cpu_task_info[i].start_row_id_idx = 0;
d_g_state->panda_cpu_task_info[i].end_idx = 0;
}//for
d_g_state->configured = true;
DoLog("CPU_GROUP_ID:[%d] DONE",d_g_state->cpu_group_id);
}
//For Version 0.2 depressed
void InitCPUMapReduce(cpu_context* d_g_state)
{
#ifdef DEV_MODE
if (d_g_state->num_input_record<=0) { DoLog("Error: no any input keys"); exit(-1);}
if (d_g_state->input_keyval_arr == NULL) { DoLog("Error: input_keyval_arr == NULL"); exit(-1);}
if (d_g_state->num_cpus_cores <= 0) { DoLog("Error: d_g_state->num_cpus == 0"); exit(-1);}
//DoLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false");
//if (d_g_state->configured)
// return;
DoLog("d_g_state->num_input_record:%d",d_g_state->num_input_record);
int totalKeySize = 0;
int totalValSize = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
totalKeySize += d_g_state->input_keyval_arr[i].keySize;
totalValSize += d_g_state->input_keyval_arr[i].valSize;
}//for
DoLog("totalKeySize:%d totalValSize:%d num_cpus:%d", totalKeySize, totalValSize, d_g_state->num_cpus_cores);
int num_cpus_cores = d_g_state->num_cpus_cores;
d_g_state->panda_cpu_task = (pthread_t *)malloc(sizeof(pthread_t)*(num_cpus_cores));
d_g_state->panda_cpu_task_info = (panda_cpu_task_info_t *)malloc(sizeof(panda_cpu_task_info_t)*(num_cpus_cores));
d_g_state->intermediate_keyval_arr_arr_p = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*d_g_state->num_input_record);
memset(d_g_state->intermediate_keyval_arr_arr_p, 0, sizeof(keyval_arr_t)*d_g_state->num_input_record);
for (int i=0;i<num_cpus_cores;i++){
d_g_state->panda_cpu_task_info[i].d_g_state = d_g_state;
d_g_state->panda_cpu_task_info[i].num_cpus_cores = num_cpus_cores;
d_g_state->panda_cpu_task_info[i].start_row_id_idx = 0;
d_g_state->panda_cpu_task_info[i].end_idx = 0;
}//for
d_g_state->configured = true;
DoLog("DONE");
#endif
}//void
//For Version 0.3 test depressed
void InitGPUMapReduce4(thread_info_t* thread_info)
{
#ifdef DEV_MODE
gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state);
job_configuration* gpu_job_conf = (job_configuration*)(thread_info->job_conf);
keyval_t * kv_p = gpu_job_conf->input_keyval_arr;
DoLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false");
//if (d_g_state->configured)
// return;
DoLog("copy %d input records from Host to GPU memory",gpu_job_conf->num_input_record);
//checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record));
int totalKeySize = 0;
int totalValSize = 0;
for(int i=0;i<gpu_job_conf->num_input_record;i++){
totalKeySize += kv_p[i].keySize;
totalValSize += kv_p[i].valSize;
}//for
DoLog("totalKeySize:%d totalValSize:%d", totalKeySize, totalValSize);
void *input_vals_shared_buff = malloc(totalValSize);
void *input_keys_shared_buff = malloc(totalKeySize);
keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*gpu_job_conf->num_input_record);
int keyPos = 0;
int valPos = 0;
int keySize = 0;
int valSize = 0;
for(int i=0; i<gpu_job_conf->num_input_record; i++){
keySize = kv_p[i].keySize;
valSize = kv_p[i].valSize;
memcpy((char *)input_keys_shared_buff + keyPos,(char *)(kv_p[i].key), keySize);
memcpy((char *)input_vals_shared_buff + valPos,(char *)(kv_p[i].val), valSize);
input_keyval_pos_arr[i].keySize = keySize;
input_keyval_pos_arr[i].keyPos = keyPos;
input_keyval_pos_arr[i].valPos = valPos;
input_keyval_pos_arr[i].valSize = valSize;
keyPos += keySize;
valPos += valSize;
}//for
checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*gpu_job_conf->num_input_record));
checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize));
checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize));
checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*gpu_job_conf->num_input_record ,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,hipMemcpyHostToDevice));
//checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,hipMemcpyHostToDevice));
hipDeviceSynchronize();
d_g_state->configured = true;
//(thread_info->d_g_state) = d_g_state;
#endif
}//void
void InitGPUMapReduce3(gpu_context* d_g_state)
{
DoLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false");
//if (d_g_state->configured)
// return;
//checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record));
int totalKeySize = 0;
int totalValSize = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
totalKeySize += d_g_state->h_input_keyval_arr[i].keySize;
totalValSize += d_g_state->h_input_keyval_arr[i].valSize;
}//for
DoLog("copy %d input records from Host to GPU memory totalKeySize:%d totalValSize:%d",d_g_state->num_input_record, totalKeySize, totalValSize);
void *input_vals_shared_buff = malloc(totalValSize);
void *input_keys_shared_buff = malloc(totalKeySize);
keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*d_g_state->num_input_record);
int keyPos = 0;
int valPos = 0;
int keySize = 0;
int valSize = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
keySize = d_g_state->h_input_keyval_arr[i].keySize;
valSize = d_g_state->h_input_keyval_arr[i].valSize;
memcpy((char *)input_keys_shared_buff + keyPos,(char *)(d_g_state->h_input_keyval_arr[i].key), keySize);
memcpy((char *)input_vals_shared_buff + valPos,(char *)(d_g_state->h_input_keyval_arr[i].val), valSize);
input_keyval_pos_arr[i].keySize = keySize;
input_keyval_pos_arr[i].keyPos = keyPos;
input_keyval_pos_arr[i].valPos = valPos;
input_keyval_pos_arr[i].valSize = valSize;
keyPos += keySize;
valPos += valSize;
}//for
checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record));
checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize));
checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize));
checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record ,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,hipMemcpyHostToDevice));
//checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,hipMemcpyHostToDevice));
hipDeviceSynchronize();
d_g_state->configured = true;
}//void
#ifdef DEV_MODE
void InitGPUMapReduce2(gpu_context* d_g_state)
{
DoLog("d_g_state->num_input_record:%d",d_g_state->num_input_record);
//checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record));
int totalKeySize = 0;
int totalValSize = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
totalKeySize += d_g_state->h_input_keyval_arr[i].keySize;
totalValSize += d_g_state->h_input_keyval_arr[i].valSize;
}//for
void *input_vals_shared_buff = malloc(totalValSize);
void *input_keys_shared_buff = malloc(totalKeySize);
keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*d_g_state->num_input_record);
int keyPos = 0;
int valPos = 0;
int keySize = 0;
int valSize = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
keySize = d_g_state->h_input_keyval_arr[i].keySize;
valSize = d_g_state->h_input_keyval_arr[i].valSize;
memcpy((char *)input_keys_shared_buff + keyPos,(char *)(d_g_state->h_input_keyval_arr[i].key), keySize);
memcpy((char *)input_vals_shared_buff + valPos,(char *)(d_g_state->h_input_keyval_arr[i].val), valSize);
input_keyval_pos_arr[i].keySize = keySize;
input_keyval_pos_arr[i].keyPos = keyPos;
input_keyval_pos_arr[i].valPos = valPos;
input_keyval_pos_arr[i].valSize = valSize;
keyPos += keySize;
valPos += valSize;
}//for
checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record));
checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize));
checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize));
checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record ,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,hipMemcpyHostToDevice));
//checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,hipMemcpyHostToDevice));
hipDeviceSynchronize();
}//void
#endif
void InitCPUDevice(thread_info_t*thread_info){
//------------------------------------------
//1, init CPU device
//------------------------------------------
cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state);
if (d_g_state->num_cpus_cores<=0) d_g_state->num_cpus_cores = getCPUCoresNum();
int tid = thread_info->tid;
DoLog( "Init CPU Deivce tid:%d",tid);
//char *fn = thread_info->file_name;
//"% num_gpus" allows more CPU threads than GPU devices
}
void InitGPUDevice(thread_info_t*thread_info){
//------------------------------------------
//1, init device
//------------------------------------------
gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state);
int tid = thread_info->tid;
int num_gpus = d_g_state->num_gpus;
if (num_gpus == 0) {
DoLog("error num_gpus == 0");
exit(-1);
}
int gpu_id;
hipGetDevice(&gpu_id);
int gpu_count = 0;
hipGetDeviceCount(&gpu_count);
DoLog("check GPU Device IDs -> tid:%d current gpu_id:%d num_gpus by user:%d hipGetDeviceCount:%d", tid, gpu_id, num_gpus, gpu_count);
if ( gpu_id != tid ){
//DoLog("hipSetDevice gpu_id %d == (tid num_gpus) %d ", gpu_id, tid%num_gpus);
hipSetDevice(tid % num_gpus);
}//if
hipGetDevice(&gpu_id);
d_g_state->gpu_id = gpu_id;
size_t total_mem,avail_mem, heap_limit;
checkCudaErrors(hipMemGetInfo( &avail_mem, &total_mem ));
hipDeviceSetLimit(hipLimitMallocHeapSize, (int)(total_mem*0.2));
hipDeviceGetLimit(&heap_limit, hipLimitMallocHeapSize);
DoLog("TID:[%d] num_gpus:%d gpu_id:%d ",tid,num_gpus,gpu_id);
DoLog("GPU_ID:[%d] hipLimitMallocHeapSize:%d MB avail_mem:%d MB total_mem:%d MB",gpu_id, heap_limit/1024/1024, avail_mem/1024/1024,total_mem/1024/1024);
}
//Ratio = Tcpu/Tgpu
//Tcpu = (execution time on CPU cores for sampled tasks)/(#sampled tasks)
//Tgpu = (execution time on 1 GPU for sampled tasks)/(#sampled tasks)
float Smart_Scheduler(job_configuration *job_conf){
#ifdef DEV_MODE
int num_gpus = 1;//job_conf->num_gpus;
int num_cpus_cores = getCPUCoresNum();//job_conf->num_cpus;
if (num_cpus_cores >2)
num_cpus_cores -= 2;
int num_cpus_group = 1;//job_conf->num_cpus_groups;
panda_context *panda = GetPandaContext();
pthread_t *no_threads = (pthread_t*)malloc(sizeof(pthread_t)*(num_gpus + num_cpus_group));
thread_info_t *thread_info = (thread_info_t*)malloc(sizeof(thread_info_t)*(num_gpus + num_cpus_group));
for (int i=0; i<num_gpus; i++){
thread_info[i].tid = i;
//thread_info[i].file_name = argv[i+1];
thread_info[i].num_gpus = num_gpus;
thread_info[i].device_type = GPU_ACC;
hipDeviceProp_t gpu_dev;
hipGetDeviceProperties(&gpu_dev, i);
thread_info[i].device_name = gpu_dev.name;
gpu_context *d_g_state = GetGPUContext();
//d_g_state->matrix_size = job_conf->matrix_size;
d_g_state->num_mappers = job_conf->num_mappers;
d_g_state->num_reducers = job_conf->num_reducers;
thread_info[i].d_g_state = d_g_state;
}//for num_gpus
for (int i=num_gpus; i<num_gpus+num_cpus_group; i++){
thread_info[i].tid = i;
thread_info[i].device_type = CPU_ACC;
cpu_context *d_g_state = GetCPUContext();
d_g_state->num_cpus_cores = num_cpus_cores;
thread_info[i].d_g_state = d_g_state;
}//for
DoLog("num_gpus:%d num_cpus_group:%d num_input_record:%d sizeof(int):%d\n", num_gpus, num_cpus_group,job_conf->num_input_record,sizeof(int));
int cpu_sampled_tasks_num = 0;
int gpu_sampled_tasks_num = 0;
cpu_sampled_tasks_num = num_cpus_cores*job_conf->auto_tuning_sample_rate;
gpu_sampled_tasks_num = getGPUCoresNum()*job_conf->auto_tuning_sample_rate;
if (cpu_sampled_tasks_num>job_conf->num_input_record)
cpu_sampled_tasks_num = job_conf->num_input_record;
if (gpu_sampled_tasks_num>job_conf->num_input_record)
gpu_sampled_tasks_num = job_conf->num_input_record;
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_group); dev_id++){
int start_row_id_id = 0;
int end_id = 0;//job_conf->num_cpus_cores*2; //(job_conf->num_input_record/100);
if (thread_info[dev_id].device_type == GPU_ACC){
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
end_id = start_row_id_id + gpu_sampled_tasks_num;
AddMapInputRecordGPU(d_g_state,(job_conf->input_keyval_arr), start_row_id_id, end_id);
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
end_id = start_row_id_id + cpu_sampled_tasks_num;
AddMapInputRecordCPU(d_g_state,(job_conf->input_keyval_arr), start_row_id_id, end_id);
}//if
}//for
double t1 = PandaTimer();
Panda_Map((void *)&(thread_info[0]));
double t2 = PandaTimer();
//start_row_id cpu
Panda_Map((void *)&(thread_info[1]));
double t3 = PandaTimer();
double t_cpu = (t3-t2);///cpu_sampled_tasks_num;
double t_gpu = (t2-t1);///gpu_sampled_tasks_num;
if (t_gpu<0.0001)
t_gpu=0.0001;
double ratio = (t_cpu*gpu_sampled_tasks_num)/(t_gpu*cpu_sampled_tasks_num);
char log[128];
sprintf(log," cpu_sampled_tasks:%d cpu time:%f cpu time per task:%f", cpu_sampled_tasks_num, t_cpu, t_cpu/(cpu_sampled_tasks_num));
DoDiskLog(log);
sprintf(log," gpu_sampled_tasks:%d gpu time:%f gpu time per task:%f ratio:%f", gpu_sampled_tasks_num, t_gpu, t_gpu/(gpu_sampled_tasks_num), ratio);
DoDiskLog(log);
#endif
return (1.0);
}//void
//--------------------------------------------------
// PandaMetaScheduler
//--------------------------------------------------
/*
* 1) input a set of panda worker (thread)
* 2) each panda worker consist of one panda job and pand device
* 3) copy input data from pand job to pand device
*/
void PandaMetaScheduler(thread_info_t *thread_info, int num_gpus, int num_cpus_groups){
panda_context *panda = GetPandaContext();
panda->num_gpus = num_gpus;
panda->num_cpus_groups = num_cpus_groups;
pthread_t *no_threads = (pthread_t*)malloc(sizeof(pthread_t)*(num_gpus + num_cpus_groups));
for (int dev_id=0; dev_id<(num_gpus + num_cpus_groups); dev_id++){
if (thread_info[dev_id].device_type == GPU_ACC){
job_configuration* gpu_job_conf = (job_configuration*)(thread_info[dev_id].job_conf);
gpu_context *d_g_state = GetGPUContext();
d_g_state->num_mappers = gpu_job_conf->num_mappers;
d_g_state->num_reducers = gpu_job_conf->num_reducers;
d_g_state->num_gpus = num_gpus;
thread_info[dev_id].tid = dev_id;
thread_info[dev_id].d_g_state = d_g_state;
DoLog("DEV_ID:[%d] GPU_ACC TID:%d",dev_id,thread_info[dev_id].tid);
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context *d_g_state = GetCPUContext();
thread_info[dev_id].tid = dev_id;
thread_info[dev_id].d_g_state = d_g_state;
DoLog("DEV_ID:[%d] CPU_ACC TID:%d",dev_id,thread_info[dev_id].tid);
}//if
}//for
///////////////////////////////////////////////////
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_groups); dev_id++){
if (thread_info[dev_id].device_type == GPU_ACC){
job_configuration *gpu_job_conf = (job_configuration *)(thread_info[dev_id].job_conf);
int start_row_id_id = 0;
int end_id = gpu_job_conf->num_input_record;
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
AddMapInputRecordGPU(d_g_state,(gpu_job_conf->input_keyval_arr), start_row_id_id,end_id);
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
job_configuration *cpu_job_conf = (job_configuration *)(thread_info[dev_id].job_conf);
int start_row_id_id = 0;
int end_id = cpu_job_conf->num_input_record;
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
AddMapInputRecordCPU(d_g_state,(cpu_job_conf->input_keyval_arr),start_row_id_id, end_id);
}//if
}//for
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_groups); dev_id++){
if (pthread_create(&(no_threads[dev_id]), NULL, Panda_Map, (char *)&(thread_info[dev_id])) != 0)
perror("Thread creation failed!\n");
}//for
for (int i = 0; i < num_gpus + num_cpus_groups; i++){
void *exitstat;
if (pthread_join(no_threads[i],&exitstat)!=0) perror("joining failed");
}//for
//DoLog("start to merge results of GPU's and CPU's device to Panda scheduler");
for (int i = 0; i < num_gpus+num_cpus_groups; i++){
if (thread_info[i].device_type == CPU_ACC)
Panda_Shuffle_Merge_CPU((panda_context*)panda, (cpu_context*)(thread_info[i].d_g_state));
if (thread_info[i].device_type == GPU_ACC)
Panda_Shuffle_Merge_GPU((panda_context*)panda, (gpu_context*)(thread_info[i].d_g_state));
}//for
//TODO reduce task ratio
int num_sorted_intermediate_record = panda->sorted_keyvals_arr_len;
int records_per_device = num_sorted_intermediate_record/(num_gpus*10+num_cpus_groups);
int *split = (int*)malloc(sizeof(int)*(num_gpus+num_cpus_groups));
for (int i=0;i<num_gpus;i++){
split[i] = records_per_device*10*(i+1);
}//for
for (int i=num_gpus;i<num_gpus+num_cpus_groups;i++){
split[i] = records_per_device*10*(num_gpus)+(i+1)*records_per_device;
}//for
split[num_gpus + num_cpus_groups-1] = num_sorted_intermediate_record;
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_groups); dev_id++){
int start_row_id_id = 0;
if (dev_id>0) start_row_id_id = split[dev_id-1];
int end_id = split[dev_id];
if (thread_info[dev_id].device_type == GPU_ACC){
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
AddReduceInputRecordGPU(d_g_state,(panda->sorted_intermediate_keyvals_arr),start_row_id_id, end_id);
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
AddReduceInputRecordCPU(d_g_state,(panda->sorted_intermediate_keyvals_arr),start_row_id_id, end_id);
}//if
}//for
for (int dev_id = 0; dev_id < (num_gpus+num_cpus_groups); dev_id++){
if (pthread_create(&(no_threads[dev_id]),NULL,Panda_Reduce,(char *)&(thread_info[dev_id]))!=0)
perror("Thread creation failed!\n");
}//for
for (int i=0; i < num_gpus + num_cpus_groups; i++){
void *exitstat;
if (pthread_join(no_threads[i],&exitstat)!=0) perror("joining failed");
}//for
///////////////////////////////////////////////////////////////////////////////////////////////////////
//Panda_Reduce_Merge(&thread_info[num_gpus-1]); //
///////////////////////////////////////////////////////////////////////////////////////////////////////
int total_output_records = 0;
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_groups); dev_id++){
if (thread_info[dev_id].device_type == GPU_ACC){
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
total_output_records += d_g_state->d_reduced_keyval_arr_len;
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
total_output_records += d_g_state->sorted_keyvals_arr_len;
}//if
}//for
DoLog("number of reduce output:%d\n",total_output_records);
DoLog("=====panda mapreduce job finished=====");
}//PandaMetaScheduler
void Start_Panda_Job(job_configuration *job_conf){
#ifdef DEV_MODE
int num_gpus = job_conf->num_gpus;
int num_cpus_cores = job_conf->num_cpus_cores;
int num_cpus_group = job_conf->num_cpus_groups;
panda_context *panda = GetPandaContext();
panda->num_gpus = num_gpus;
panda->num_cpus_groups = num_cpus_group;
DoLog("Start num_gpus:%d num_cpus_groups:%d", num_gpus, num_cpus_group);
pthread_t *no_threads = (pthread_t*)malloc(sizeof(pthread_t)*(num_gpus + num_cpus_group));
thread_info_t *thread_info = (thread_info_t*)malloc(sizeof(thread_info_t)*(num_gpus + num_cpus_group));
for (int i=0; i<num_gpus; i++){
thread_info[i].tid = i;
//thread_info[i].file_name = argv[i+1];
thread_info[i].num_gpus = num_gpus;
thread_info[i].device_type = GPU_ACC;
hipDeviceProp_t gpu_dev;
hipGetDeviceProperties(&gpu_dev, i);
//DoLog("Configure Device ID:%d: Device Name:%s MultProcessorCount:%d sm_per_multiproc:%d", i, gpu_dev.name,gpu_dev.multiProcessorCount,sm_per_multiproc);
thread_info[i].device_name = gpu_dev.name;
gpu_context *d_g_state = GetGPUContext();
d_g_state->matrix_size = job_conf->matrix_size;
d_g_state->num_mappers = job_conf->num_mappers;
d_g_state->num_reducers = job_conf->num_reducers;
thread_info[i].d_g_state = d_g_state;
}//for num_gpus
for (int i=num_gpus; i<num_gpus+num_cpus_group; i++){
thread_info[i].tid = i;
thread_info[i].device_type = CPU_ACC;
cpu_context *d_g_state = GetCPUContext();
d_g_state->num_cpus_cores = num_cpus_cores;
thread_info[i].d_g_state = d_g_state;
}//for
///////////////////////////////////////////////////
double ratio = 10.0;
ratio = (double)(job_conf->ratio);
if (job_conf->auto_tuning){
ratio = (Smart_Scheduler(job_conf));
job_conf->ratio = ratio;
}//if
//////////////////////////////////////////////////
DoLog("num_gpus:%d num_cpus_group:%d num_input_record:%d sizeof(int):%d ratio:%f\n", num_gpus, num_cpus_group,job_conf->num_input_record,sizeof(int),ratio);
//DoLog("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Smart Scheduler Test"); return;
int *split = NULL;
split = (int *)malloc(sizeof(int)*(num_gpus+num_cpus_group));
int num_input_record = job_conf->num_input_record;
int records_per_device = (int)(num_input_record/(num_gpus*ratio+num_cpus_group));
for (int i=0;i<num_gpus;i++){
split[i] = (int)(records_per_device*ratio*(i+1));
}//for
for (int i=num_gpus;i<num_gpus+num_cpus_group;i++){
split[i] = (int)(records_per_device*ratio*(num_gpus)+(i+1)*records_per_device);
}//for
split[num_gpus+num_cpus_group-1] = num_input_record;
printf("--- split:num_input_record:%d records_per_device:%d ",num_input_record, records_per_device);
for (int i=0;i<num_gpus+num_cpus_group;i++)
printf("%d\t",split[i]);
printf("\n");
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_group); dev_id++){
int start_row_id_id = 0;
if (dev_id>0) start_row_id_id = split[dev_id-1];
int end_id = split[dev_id];
if (thread_info[dev_id].device_type == GPU_ACC){
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
//for (int i=start_row_id_id; i<end_id; i++){
//printf(":%s keySize:%d",job_conf->input_keyval_arr[i].val, job_conf->input_keyval_arr[i].valSize);
AddMapInputRecordGPU(d_g_state,(job_conf->input_keyval_arr), start_row_id_id,end_id);
//}//for
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
//for (int i=start_row_id_id;i<end_id;i++){
AddMapInputRecordCPU(d_g_state,(job_conf->input_keyval_arr),start_row_id_id, end_id);
//}//for
}//if
}//for
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_group); dev_id++){
//if (thread_info[dev_id].device_type == GPU_ACC){
if (pthread_create(&(no_threads[dev_id]), NULL, Panda_Map, (char *)&(thread_info[dev_id])) != 0)
perror("Thread creation failed!\n");
}//for
for (int i=0; i < num_gpus + num_cpus_group; i++){
void *exitstat;
if (pthread_join(no_threads[i],&exitstat)!=0) perror("joining failed");
}//for
DoLog("start_row_id to merge!");
for (int i = 0; i<num_gpus; i++){
Panda_Shuffle_Merge_GPU((panda_context*)panda, (gpu_context*)(thread_info[i].d_g_state));
}//for
for (int i = num_gpus; i < num_gpus+num_cpus_group; i++){
Panda_Shuffle_Merge_CPU((panda_context*)panda, (cpu_context*)(thread_info[i].d_g_state));
}//for
DoLog("totoal number of different intermediate records:%d",panda->sorted_keyvals_arr_len);
//TOD smart job for reduce ratio
//hipDeviceSynchronize();
//static scheduling -- split the workload between devices
int num_sorted_intermediate_record = panda->sorted_keyvals_arr_len;
records_per_device = num_sorted_intermediate_record/(num_gpus*10+num_cpus_group);
for (int i=0;i<num_gpus;i++){
split[i] = records_per_device*10*(i+1);
}//for
for (int i=num_gpus;i<num_gpus+num_cpus_group;i++){
split[i] = records_per_device*10*(num_gpus)+(i+1)*records_per_device;
}//for
split[num_gpus + num_cpus_group-1] = num_sorted_intermediate_record;
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_group); dev_id++){
int start_row_id_id = 0;
if (dev_id>0) start_row_id_id = split[dev_id-1];
int end_id = split[dev_id];
if (thread_info[dev_id].device_type == GPU_ACC){
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
//for (int i=start_row_id_id; i<end_id; i++){
AddReduceInputRecordGPU(d_g_state,(panda->sorted_intermediate_keyvals_arr),start_row_id_id, end_id);
//}//for
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
//for (int i=start_row_id_id;i<end_id;i++){
AddReduceInputRecordCPU(d_g_state,(panda->sorted_intermediate_keyvals_arr),start_row_id_id, end_id);
//}//for
}//if
}//for
for (int dev_id = 0; dev_id < (num_gpus+num_cpus_group); dev_id++){
if (pthread_create(&(no_threads[dev_id]),NULL,Panda_Reduce,(char *)&(thread_info[dev_id]))!=0)
perror("Thread creation failed!\n");
}//for
for (int i=0; i < num_gpus + num_cpus_group; i++){
void *exitstat;
if (pthread_join(no_threads[i],&exitstat)!=0) perror("joining failed");
}//for
///////////////////////////////////////////////////////////////////////////////////////////////////////
//Panda_Reduce(&thread_info[num_gpus-1]);
///////////////////////////////////////////////////////////////////////////////////////////////////////
DoLog("Finishing Panda MapReduce Job...");
int total_output_records = 0;
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_group); dev_id++){
if (thread_info[dev_id].device_type == GPU_ACC){
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
total_output_records += d_g_state->d_reduced_keyval_arr_len;
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
total_output_records += d_g_state->sorted_keyvals_arr_len;
}//if
}//for
DoLog("there are :%d output records\n",total_output_records);
DoLog("=====finish map/reduce=====");
#endif
}
void AddPandaTask(job_configuration* job_conf,
void* key,
void* val,
int keySize,
int valSize){
int len = job_conf->num_input_record;
if (len<0) return;
if (len == 0) job_conf->input_keyval_arr = NULL;
job_conf->input_keyval_arr = (keyval_t *)realloc(job_conf->input_keyval_arr, sizeof(keyval_t)*(len+1));
job_conf->input_keyval_arr[len].keySize = keySize;
job_conf->input_keyval_arr[len].valSize = valSize;
job_conf->input_keyval_arr[len].key = malloc(keySize);
job_conf->input_keyval_arr[len].val = malloc(valSize);
memcpy(job_conf->input_keyval_arr[len].key,key,keySize);
memcpy(job_conf->input_keyval_arr[len].val,val,valSize);
job_conf->num_input_record++;
}
void AddReduceInputRecordGPU(gpu_context* d_g_state, keyvals_t * sorted_intermediate_keyvals_arr, int start_row_idi, int endi){
long total_count = 0;
for(int i=start_row_idi;i<endi;i++){
total_count += sorted_intermediate_keyvals_arr[i].val_arr_len;
}//for
int totalKeySize = 0;
int totalValSize = 0;
for(int i=start_row_idi;i<endi;i++){
totalKeySize += sorted_intermediate_keyvals_arr[i].keySize;
for (int j=0;j<sorted_intermediate_keyvals_arr[i].val_arr_len;j++)
totalValSize += sorted_intermediate_keyvals_arr[i].vals[j].valSize;
}//for
DoLog("totalKeySize:%d totalValSize:%d ",totalKeySize,totalValSize);
checkCudaErrors(hipMalloc((void **)&d_g_state->d_sorted_keys_shared_buff,totalKeySize));
checkCudaErrors(hipMalloc((void **)&d_g_state->d_sorted_vals_shared_buff,totalValSize));
checkCudaErrors(hipMalloc((void **)&d_g_state->d_keyval_pos_arr,sizeof(keyval_pos_t)*total_count));
d_g_state->h_sorted_keys_shared_buff = malloc(sizeof(char)*totalKeySize);
d_g_state->h_sorted_vals_shared_buff = malloc(sizeof(char)*totalValSize);
char *sorted_keys_shared_buff = (char *)d_g_state->h_sorted_keys_shared_buff;
char *sorted_vals_shared_buff = (char *)d_g_state->h_sorted_vals_shared_buff;
char *keyval_pos_arr = (char *)malloc(sizeof(keyval_pos_t)*total_count);
int sorted_key_arr_len = (endi-start_row_idi);
keyval_pos_t *tmp_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*total_count);
DoLog("total number of different intermediate records:%d total records:%d", endi-start_row_idi, total_count);
int *pos_arr_4_pos_arr = (int*)malloc(sizeof(int)*(sorted_key_arr_len));
memset(pos_arr_4_pos_arr,0,sizeof(int)*sorted_key_arr_len);
int index = 0;
int keyPos = 0;
int valPos = 0;
for (int i=start_row_idi;i<endi;i++){
keyvals_t* p = (keyvals_t*)&(sorted_intermediate_keyvals_arr[i]);
memcpy(sorted_keys_shared_buff+keyPos,p->key, p->keySize);
for (int j=0;j<p->val_arr_len;j++){
tmp_keyval_pos_arr[index].keyPos = keyPos;
tmp_keyval_pos_arr[index].keySize = p->keySize;
tmp_keyval_pos_arr[index].valPos = valPos;
tmp_keyval_pos_arr[index].valSize = p->vals[j].valSize;
memcpy(sorted_vals_shared_buff + valPos,p->vals[j].val,p->vals[j].valSize);
valPos += p->vals[j].valSize;
index++;
}//for
keyPos += p->keySize;
pos_arr_4_pos_arr[i-start_row_idi] = index;
}//
d_g_state->d_sorted_keyvals_arr_len = endi-start_row_idi;
checkCudaErrors(hipMemcpy(d_g_state->d_keyval_pos_arr,tmp_keyval_pos_arr,sizeof(keyval_pos_t)*total_count,hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,sizeof(int)*sorted_key_arr_len));
checkCudaErrors(hipMemcpy(d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,pos_arr_4_pos_arr,sizeof(int)*sorted_key_arr_len,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_g_state->d_sorted_keys_shared_buff, sorted_keys_shared_buff, sizeof(char)*totalKeySize,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_g_state->d_sorted_vals_shared_buff, sorted_vals_shared_buff, sizeof(char)*totalValSize,hipMemcpyHostToDevice));
}
void AddMapInputRecordGPU(gpu_context* d_g_state,
keyval_t *kv_p, int start_row_id_id, int end_id){
int len = d_g_state->num_input_record;
//DoLog("len:%d realloc:%d",len,sizeof(keyval_t)*(len+1));
if (len == 0) d_g_state->h_input_keyval_arr = NULL;
if (len<0) return;
if (end_id<=start_row_id_id) return;
DoLog("GPU_ID:[%d] add map input record for gpu context current #input:%d added #input:%d",d_g_state->gpu_id, len, end_id - start_row_id_id);
d_g_state->h_input_keyval_arr = (keyval_t *)realloc(d_g_state->h_input_keyval_arr, sizeof(keyval_t)*(len+end_id - start_row_id_id));
//assert(d_g_state->h_input_keyval_arr != NULL);
for (int i=start_row_id_id;i<end_id;i++){
d_g_state->h_input_keyval_arr[len].keySize = kv_p[i].keySize;
d_g_state->h_input_keyval_arr[len].valSize = kv_p[i].valSize;
d_g_state->h_input_keyval_arr[len].key = kv_p[i].key;
d_g_state->h_input_keyval_arr[len].val = kv_p[i].val;
//memcpy(d_g_state->h_input_keyval_arr[len].key,key,keySize);
//memcpy(d_g_state->h_input_keyval_arr[len].val,val,valSize);
d_g_state->num_input_record++;
len++;
}
//DoLog("added %d map tasks",end_id-start_row_id_id);
}
void AddMapInputRecordCPU(cpu_context* d_g_state,
keyval_t *kv_p, int start_row_idi, int endi){
int len = d_g_state->num_input_record;
if (len<0) return;
if (len == 0) d_g_state->input_keyval_arr = NULL;
DoLog("CPU_GROUP_ID:[%d] add map input record for cpu context current #input:%d added #input:%d",d_g_state->cpu_group_id,len,endi-start_row_idi);
//DoLog("len:%d size:%d",len,sizeof(keyval_t)*(len+1));
d_g_state->input_keyval_arr = (keyval_t *)realloc(d_g_state->input_keyval_arr, sizeof(keyval_t)*(len+endi-start_row_idi));
for (int i=start_row_idi;i<endi;i++){
d_g_state->input_keyval_arr[len].keySize = kv_p[i].keySize;
d_g_state->input_keyval_arr[len].valSize = kv_p[i].valSize;
d_g_state->input_keyval_arr[len].key = kv_p[i].key;
d_g_state->input_keyval_arr[len].val = kv_p[i].val;
d_g_state->num_input_record++;
len++;
}
//DoLog("add map input record len:%d",len);
}
void AddReduceInputRecordCPU(cpu_context* d_g_state,
keyvals_t *kv_p, int start_row_id_id, int end_id){
if (end_id<start_row_id_id){ DoLog("error ! end_id <= start_row_id_id"); end_id = start_row_id_id; }
int len = d_g_state->sorted_keyvals_arr_len;
if (len<0) { DoLog("error ! len<0"); return; }
if (len == 0) d_g_state->sorted_intermediate_keyvals_arr = NULL;
//DoLog("start_row_id_id:%d, end_id:%d, len:%d\n",start_row_id_id,end_id,len);
d_g_state->sorted_intermediate_keyvals_arr = (keyvals_t *)realloc(d_g_state->sorted_intermediate_keyvals_arr,
sizeof(keyvals_t)*(len+end_id-start_row_id_id));
for (int i = len; i< len+end_id-start_row_id_id; i++){
int test = kv_p[start_row_id_id+i-len].keySize;
d_g_state->sorted_intermediate_keyvals_arr[i].keySize = kv_p[start_row_id_id+i-len].keySize;
d_g_state->sorted_intermediate_keyvals_arr[i].key = kv_p[start_row_id_id+i-len].key;
d_g_state->sorted_intermediate_keyvals_arr[i].vals = kv_p[start_row_id_id+i-len].vals;
d_g_state->sorted_intermediate_keyvals_arr[i].val_arr_len = kv_p[start_row_id_id+i-len].val_arr_len;
d_g_state->sorted_keyvals_arr_len++;
}//for
}
__device__ void Emit2 (void* key,
void* val,
int keySize,
int valSize,
gpu_context *d_g_state){
keyval_t *p = &(d_g_state->d_reduced_keyval_arr[TID]);
p->keySize = keySize;
p->key = malloc(keySize);
memcpy(p->key,key,keySize);
p->valSize = valSize;
p->val = malloc(valSize);
memcpy(p->val,val,valSize);
printf("[output]: key:%s val:%d\n",key,*(int *)val);
}//__device__
void CPUEmitIntermediate(void *key, void *val, int keySize, int valSize, cpu_context *d_g_state, int map_task_idx){
//printf(":%s :%d\n",key, *(int*)val);
keyval_arr_t *kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[map_task_idx]);
//keyval_t *p = (keyval_t *)kv_arr_p->arr;
//void *buff = kv_arr_p->buff;
//&(kv_arr_p->arr[len]) = (keyval_t*)((char *)buff - sizeof(keyval_t));
//keyval_t *kv_p = (keyval_t *)((char *)buff + kv_arr_p->buff_len - sizeof(keyval_t)*((*kv_arr_p->total_arr_len)+1));
if (kv_arr_p->arr_len==0)
kv_arr_p->arr = NULL;
kv_arr_p->arr = (keyval_t*)realloc(kv_arr_p->arr, sizeof(keyval_t)*(kv_arr_p->arr_len+1));
//(*kv_arr_p->total_arr_len)++;
/*
if (!(kv_arr_p->buff_pos +keySize+valSize < kv_arr_p->buff_len - sizeof(keyval_t)*((*kv_arr_p->total_arr_len)+1))){
printf("!!!!!!!error there is not engough shared memory\n");
return;
}*/
//printf("remain buff:%d\n", kv_arr_p->buff_len - sizeof(keyval_t)*(kv_arr_p->arr_len+1) - kv_arr_p->buff_pos);
int current_map_output_index = (kv_arr_p->arr_len);
keyval_t *kv_p = &(kv_arr_p->arr[current_map_output_index]);
kv_p->key = (char *)malloc(sizeof(keySize));
//kv_arr_p->buff_pos += keySize;
memcpy(kv_p->key,key,keySize);
kv_p->keySize = keySize;
kv_p->val = (char *)malloc(sizeof(valSize));
//kv_arr_p->buff_pos += valSize;
memcpy(kv_p->val,val,valSize);
kv_p->valSize = valSize;
kv_arr_p->arr_len++;
//DoLog("current d_g_state->intermediate_keyval_arr_arr_p len:%d count:%d",kv_arr_p->arr_len,count);
//d_g_state->d_intermediate_keyval_total_count[map_task_idx] = kv_arr_p->arr_len;
/*
kv_p->key = (char *)(buff)+kv_arr_p->buff_pos;
kv_arr_p->buff_pos += keySize;
memcpy(kv_p->key,key,keySize);
kv_p->keySize = keySize;
kv_p->val = (char *)(buff)+kv_arr_p->buff_pos;
kv_arr_p->buff_pos += valSize;
memcpy(kv_p->val,val,valSize);
kv_p->valSize = valSize;
kv_arr_p->arr_len++;
d_g_state->d_intermediate_keyval_total_count[map_task_idx] = kv_arr_p->arr_len;
*/
//printf("CPU EmitInterMediate2 map_task_id:%d, key:%s: keyval_arr_len:%d\n", map_task_idx, kv_p->key, kv_arr_p->arr_len);
}//__device__
__device__ void EmitIntermediate2(void *key, void *val, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){
//printf(":%s :%d\n",key, *(int*)val);
keyval_arr_t *kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[map_task_idx];
//keyval_t *p = (keyval_t *)kv_arr_p->arr;
void *buff = kv_arr_p->buff;
//&(kv_arr_p->arr[len]) = (keyval_t*)((char *)buff - sizeof(keyval_t));
keyval_t *kv_p = (keyval_t *)((char *)buff + kv_arr_p->buff_len - sizeof(keyval_t)*((*kv_arr_p->total_arr_len)+1));
kv_arr_p->arr = kv_p;
(*kv_arr_p->total_arr_len)++;
//TODO Hui Li 8/6/2012
if (!(kv_arr_p->buff_pos +keySize+valSize < kv_arr_p->buff_len - sizeof(keyval_t)*((*kv_arr_p->total_arr_len)+1))){
printf("!!!!!!!error there is not engough shared memory\n");
return;
}//
//printf("remain buff:%d\n", kv_arr_p->buff_len - sizeof(keyval_t)*(kv_arr_p->arr_len+1) - kv_arr_p->buff_pos);
//int current_map_output_index = (kv_arr_p->arr_len);
//keyval_t *kv_p = &(kv_arr_p->arr[current_map_output_index]);
kv_p->key = (char *)(buff)+kv_arr_p->buff_pos;
kv_arr_p->buff_pos += keySize;
memcpy(kv_p->key,key,keySize);
kv_p->keySize = keySize;
kv_p->val = (char *)(buff)+kv_arr_p->buff_pos;
kv_arr_p->buff_pos += valSize;
memcpy(kv_p->val,val,valSize);
kv_p->valSize = valSize;
kv_arr_p->arr_len++;
d_g_state->d_intermediate_keyval_total_count[map_task_idx] = kv_arr_p->arr_len;
//printf("EmitInterMediate2 TID[%d] map_task_id:%d, key%s: keyval_arr_len:%d\n",TID, map_task_idx, kv_p->key, kv_arr_p->arr_len);
}//__device__
//-------------------------------------------------
//called by user defined map function
//-------------------------------------------------
__global__ void GPUMapPartitioner(gpu_context d_g_state)
{
/*int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;*/
int num_records_per_thread = (d_g_state.num_input_record+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x);
int block_start_row_id_idx = num_records_per_thread*blockIdx.x*blockDim.x;
int thread_start_row_id_idx = block_start_row_id_idx
+ (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE
+ (threadIdx.x%STRIDE);
int thread_end_idx = thread_start_row_id_idx+num_records_per_thread*STRIDE;
if (thread_end_idx>d_g_state.num_input_record)
thread_end_idx = d_g_state.num_input_record;
if (thread_start_row_id_idx >= thread_end_idx)
return;
char * buff = (char *)malloc(sizeof(char)*1024*100);
int * total_arr_len = (int*)malloc(sizeof(int));
(*total_arr_len) = 0;
keyval_arr_t *kv_arr_t_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*(thread_end_idx-thread_start_row_id_idx+STRIDE-1)/STRIDE);
int index = 0;
//printf("Mapper TID:%d, thread_start_row_id_idx:%d thread_end_idx:%d runed tasks:%d totalTasks:%d\n",
// TID, thread_start_row_id_idx,thread_end_idx,(thread_end_idx - thread_start_row_id_idx)/STRIDE,d_g_state.num_input_record);
for(int map_task_idx=thread_start_row_id_idx; map_task_idx < thread_end_idx; map_task_idx+=STRIDE){
keyval_arr_t *kv_arr_t = (keyval_arr_t *)&(kv_arr_t_arr[index]);
index++;
kv_arr_t->buff = buff;
kv_arr_t->total_arr_len = total_arr_len;
kv_arr_t->buff_len = 1024*100;
kv_arr_t->buff_pos = 0;
kv_arr_t->arr = NULL;
kv_arr_t->arr_len = 0;
//keyval_arr_t *kv_arr_p = &(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx]);
d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx] = kv_arr_t;
/*(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx]).buff = buff;
(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx]).buff_len = 1024*1024;
(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx]).buff_pos = 0;*/
//void *val = d_g_state.d_input_keyval_arr[map_task_idx].val;
char *key = (char *)(d_g_state.d_input_keys_shared_buff) + d_g_state.d_input_keyval_pos_arr[map_task_idx].keyPos;
char *val = (char *)(d_g_state.d_input_vals_shared_buff) + d_g_state.d_input_keyval_pos_arr[map_task_idx].valPos;
int valSize = d_g_state.d_input_keyval_pos_arr[map_task_idx].valSize;
int keySize = d_g_state.d_input_keyval_pos_arr[map_task_idx].keySize;
//printf("map_task_idx:%d keySize:%d valSize:%d\n",map_task_idx, keySize, valSize);
//printf("key:%d keySize:%d\n",*(int *)key, keySize);
//TODO calculate the key val pair here directly.
///////////////////////////////////////////////////////////
gpu_map(key, val, keySize, valSize, &d_g_state, map_task_idx);
///////////////////////////////////////////////////////////
}//for
//printf("task id:%d\n",TID);
__syncthreads();
}//GPUMapPartitioner
int StartCPUMap2(thread_info_t* thread_info)
{
cpu_context *d_g_state = (cpu_context*)(thread_info->d_g_state);
job_configuration *cpu_job_conf = (job_configuration*)(thread_info->job_conf);
//DoLog("there are %d input records for map tasks.",cpu_job_conf->num_input_record);
if (cpu_job_conf->num_input_record<=0) { DoLog("Error: no any input keys"); exit(-1);}
if (cpu_job_conf->input_keyval_arr == NULL) { DoLog("Error: input_keyval_arr == NULL"); exit(-1);}
if (d_g_state->num_cpus_cores <= 0) { DoLog("Error: d_g_state->num_cpus == 0"); exit(-1);}
//-------------------------------------------------------
//1, prepare buffer to store intermediate results
//-------------------------------------------------------
//DoLog("prepare buffer to store intermediate results");
keyval_arr_t *d_keyval_arr_p;
int *count = NULL;
//---------------------------------------------
//3, determine the number of threads to run
//---------------------------------------------
DoLog("CPU_GROUP_ID:[%d] the number of cpus used in computation:%d",d_g_state->cpu_group_id, d_g_state->num_cpus_cores);
//--------------------------------------------------
//4, start_row_id map
//--------------------------------------------------
int num_threads = d_g_state->num_cpus_cores;
//DoLog("start_row_id CPUMapPartitioner num_threads:%d num_input_record:%d",num_threads, cpu_job_conf->num_input_record);
int num_records_per_thread = (cpu_job_conf->num_input_record+num_threads-1)/(num_threads);
int start_row_id_idx = 0;
int end_idx = 0;
//pthread_t *cpu_threads;
//thread_info_t *cpu_threads_info;
for (int tid = 0;tid<num_threads;tid++){
end_idx = start_row_id_idx + num_records_per_thread;
if (tid < (cpu_job_conf->num_input_record % num_threads) )
end_idx++;
d_g_state->panda_cpu_task_info[tid].start_row_id_idx = start_row_id_idx;
if (end_idx > cpu_job_conf->num_input_record)
end_idx = cpu_job_conf->num_input_record;
d_g_state->panda_cpu_task_info[tid].end_idx = end_idx;
if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUMapThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0)
perror("Thread creation failed!\n");
start_row_id_idx = end_idx;
}//for
for (int tid = 0;tid<num_threads;tid++){
void *exitstat;
if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed");
}//for
DoLog("CPU_GROUP_ID:[%d] DONE", thread_info->tid);
return 0;
}//int
int StartCPUMap(cpu_context *d_g_state)
{
#ifdef DEV_MODE
DoLog("there are %d map tasks.",d_g_state->num_input_record);
if (d_g_state->num_input_record<=0) { DoLog("Error: no any input keys"); exit(-1);}
if (d_g_state->input_keyval_arr == NULL) { DoLog("Error: input_keyval_arr == NULL"); exit(-1);}
if (d_g_state->num_cpus_cores <= 0) { DoLog("Error: d_g_state->num_cpus == 0"); exit(-1);}
//-------------------------------------------------------
//1, prepare buffer to store intermediate results
//-------------------------------------------------------
DoLog("prepare buffer to store intermediate results");
keyval_arr_t *d_keyval_arr_p;
int *count = NULL;
//---------------------------------------------
//3, determine the number of threads to run
//---------------------------------------------
DoLog("the number of cpus used in computation:%d",d_g_state->num_cpus_cores);
//--------------------------------------------------
//4, start_row_id map
//--------------------------------------------------
int num_threads = d_g_state->num_cpus_cores;
DoLog("start_row_id CPUMapPartitioner num_threads:%d num_input_record:%d",num_threads, d_g_state->num_input_record);
int num_records_per_thread = (d_g_state->num_input_record+num_threads-1)/(num_threads);
int start_row_id_idx = 0;
int end_idx = 0;
//pthread_t *cpu_threads;
//thread_info_t *cpu_threads_info;
for (int tid = 0;tid<num_threads;tid++){
end_idx = start_row_id_idx + num_records_per_thread;
if (tid < (d_g_state->num_input_record % num_threads) )
end_idx++;
d_g_state->panda_cpu_task_info[tid].start_row_id_idx = start_row_id_idx;
if (end_idx > d_g_state->num_input_record)
end_idx = d_g_state->num_input_record;
d_g_state->panda_cpu_task_info[tid].end_idx = end_idx;
//pthread_t *panda_cpu_task;
//panda_cpu_task_info_t *panda_cpu_task_info;
DoLog("tests");
if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUMapThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0)
perror("Thread creation failed!\n");
start_row_id_idx = end_idx;
}//for
for (int tid = 0;tid<num_threads;tid++){
void *exitstat;
if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed");
}//for
DoLog("DONE :%d tasks current intermediate len:%d",panda_cpu_task_info->end_idx - panda_cpu_task_info->start_row_id_idx, d_g_state->intermediate_keyval_arr_arr_p[0].arr_len);
DoLog("DONE");
#endif
return 0;
}//int
//--------------------------------------------------
//StartGPUMap
//
//7/1/2012
//--------------------------------------------------
int StartGPUMap(gpu_context *d_g_state)
{
//-------------------------------------------------------
//0, Check status of d_g_state;
//-------------------------------------------------------
DoLog("GPU_ID:[%d] check num_input_record h_input_keyval_arr before StartGPUMap",d_g_state->gpu_id, d_g_state->num_input_record);
if (d_g_state->num_input_record<0) { DoLog("Error: no any input keys"); exit(-1);}
if (d_g_state->h_input_keyval_arr == NULL) { DoLog("Error: h_input_keyval_arr == NULL"); exit(-1);}
if (d_g_state->num_mappers<=0) {d_g_state->num_mappers = (NUM_BLOCKS)*(NUM_THREADS);}
if (d_g_state->num_reducers<=0) {d_g_state->num_reducers = (NUM_BLOCKS)*(NUM_THREADS);}
//-------------------------------------------------------
//1, prepare buffer to store intermediate results
//-------------------------------------------------------
//DoLog("prepare buffer to store intermediate results");
keyval_arr_t *h_keyval_arr_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*d_g_state->num_input_record);
keyval_arr_t *d_keyval_arr_arr;
checkCudaErrors(hipMalloc((void**)&(d_keyval_arr_arr),d_g_state->num_input_record*sizeof(keyval_arr_t)));
for (int i=0; i<d_g_state->num_input_record;i++){
h_keyval_arr_arr[i].arr = NULL;
h_keyval_arr_arr[i].arr_len = 0;
}//for
//checkCudaErrors(hipMemcpy(d_keyval_arr_arr, h_keyval_arr_arr, sizeof(keyval_arr_t)*d_g_state->num_input_record,hipMemcpyHostToDevice));
//d_g_state->d_intermediate_keyval_arr_arr = d_keyval_arr_arr;
keyval_arr_t **d_keyval_arr_arr_p;
checkCudaErrors(hipMalloc((void***)&(d_keyval_arr_arr_p),d_g_state->num_input_record*sizeof(keyval_arr_t*)));
d_g_state->d_intermediate_keyval_arr_arr_p = d_keyval_arr_arr_p;
int *count = NULL;
checkCudaErrors(hipMalloc((void**)&(count),d_g_state->num_input_record*sizeof(int)));
d_g_state->d_intermediate_keyval_total_count = count;
checkCudaErrors(hipMemset(d_g_state->d_intermediate_keyval_total_count,0,d_g_state->num_input_record*sizeof(int)));
//----------------------------------------------
//3, determine the number of threads to run
//----------------------------------------------
//TODO determine the number of threads to run
//DoLog("gpu_id[%d] determine the number of threads (NUM_BLOCKS, NUM_THREADS) to run GPUMapPartitioner",d_g_state->gpu_id);
//int num_threads = d_g_state->num_input_record;
//calculate NUM_BLOCKS, NUM_THREADS
//--------------------------------------------------
//4, start_row_id map
//--------------------------------------------------
/*dim3 h_dimBlock(512,1,1);
dim3 h_dimGrid(4,1,1);
dim3 h_dimThread(1,1,1);
int sizeSmem = 128;*/
//DoLog("start_row_id GPUMapPartitioner");
hipDeviceSynchronize();
//printf("avail_mem:%d \n",avail_mem);
//modified for Panda Matrix Multiplication
//int block_size = 10;
//dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
//dim3 grid(d_g_state->matrix_size / threads.x, d_g_state->matrix_size / threads.y);
//dim3 grid(d_g_state->matrix_size / BLOCK_SIZE, d_g_state->matrix_size / BLOCK_SIZE);
//DoLog(" matrix size :%d block_size:%d threads.y:%d threads.x:%d",d_g_state->matrix_size, BLOCK_SIZE, threads.y, threads.x);
int num_blocks = (d_g_state->num_mappers+(NUM_THREADS)-1)/(NUM_THREADS);
//GPUMapPartitioner<<<NUM_BLOCKS,NUM_THREADS>>>(*d_g_state);
DoLog("GPU_ID:[%d] NUM_BLOCKS:%d NUM_THREADS:%d to run GPUMapPartitioner",d_g_state->gpu_id, num_blocks, NUM_THREADS);
hipLaunchKernelGGL(( GPUMapPartitioner), dim3(num_blocks),dim3(NUM_THREADS), 0, 0, *d_g_state);
hipDeviceSynchronize();
//size_t total_mem,avail_mem, heap_limit;
//checkCudaErrors(hipMemGetInfo( &avail_mem, &total_mem ));
//printf("avail_mem:%d \n",avail_mem);
DoLog("GPU_ID:[%d] DONE",d_g_state->gpu_id);
return 0;
}//int
//--------------------------------------------------
//start_row_id map
//
//1, get map input data on host
//2, upload map input data to device memory
// (keys, vals, keyOffsets, valOffsets, keySizes, valSizes)
//3, determine the number of threads to run
//4, calculate intermediate data keys'buf size
// and values' buf size
//5, do prefix sum on--
// i) d_interKeysSizePerTask
// ii) d_interValsSizePerTask
// iii) d_interCountPerTask
//6, allocate intermediate memory on device memory
//7, start_row_id map
//8, free allocated memory
//--------------------------------------------------
/*
int start_row_idMap(Spec_t* spec, gpu_context *d_g_state)
{
Spec_t* g_spec = spec;
if (g_spec->inputKeys == NULL) { DoLog("Error: no any input keys"); exit(0);}
if (g_spec->inputVals == NULL) { DoLog("Error: no any input values"); exit(0); }
if (g_spec->inputOffsetSizes == NULL) { DoLog( "Error: no any input pointer info"); exit(0); }
if (g_spec->inputRecordCount == 0) {DoLog( "Error: invalid input record count"); exit(0);}
//-------------------------------------------------------
//1, get map input data on host
//-------------------------------------------------------
return 0;
}//return 0;
*/
void DestroyDGlobalState(gpu_context * d_g_state){
}//void
void StartGPUShuffle(gpu_context * state){
DoLog("GPU_ID:[%d] GPU Shuffle", state->gpu_id);
gpu_context* d_g_state = state;
Shuffle4GPUOutput(d_g_state);
//DoLog("DONE");
}//void
void *RunPandaCPUMapThread(void *ptr){
panda_cpu_task_info_t *panda_cpu_task_info = (panda_cpu_task_info_t *)ptr;
cpu_context *d_g_state = (cpu_context *)(panda_cpu_task_info->d_g_state);
job_configuration *cpu_job_conf = (job_configuration *)(panda_cpu_task_info->cpu_job_conf);
//DoLog("panda_cpu_task_info_t start_row_id_idx:%d end_idx:%d",panda_cpu_task_info->start_row_id_idx, panda_cpu_task_info->end_idx);
for (int map_idx = panda_cpu_task_info->start_row_id_idx; map_idx < panda_cpu_task_info->end_idx; map_idx++){
keyval_t *kv_p = (keyval_t *)(&(cpu_job_conf->input_keyval_arr[map_idx]));
//void cpu_map(void *KEY, void*VAL, int keySize, int valSize, cpu_context *d_g_state, int map_task_idx){
cpu_map(kv_p->key,kv_p->val,kv_p->keySize,kv_p->valSize,d_g_state,map_idx);
//DoLog("finished map_task:%d at tid:%d",d_g_state->end_idx - d_g_state->start_row_id_idx, d_g_state->tid);
}//for
DoLog("DONE :%d tasks current intermediate len:%d",panda_cpu_task_info->end_idx - panda_cpu_task_info->start_row_id_idx, d_g_state->intermediate_keyval_arr_arr_p[0].arr_len);
return NULL;
}
//Use Pthread to process Panda_Reduce
void * Panda_Reduce(void *ptr){
thread_info_t *thread_info = (thread_info_t *)ptr;
if(thread_info->device_type == GPU_ACC){
int tid = thread_info->tid;
int num_gpus = 1;//thread_info->num_gpus;
if (num_gpus == 0){
DoLog("thread_info->num_gpus == 0 return");
return NULL;
}//if
hipSetDevice(tid % num_gpus); // "% num_gpus" allows more CPU threads than GPU devices
int gpu_id;
hipGetDevice(&gpu_id);
gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state);
DoLog( "start_row_id reduce tasks on GPU:%d tid:%d",gpu_id, tid);
StartGPUReduce(d_g_state);
}//if
if(thread_info->device_type == CPU_ACC){
cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state);
DoLog("Start CPU Reduce Tasks");
if (d_g_state->num_cpus_cores == 0){
DoLog("d_g_state->num_cpus == 0 return");
return NULL;
}
//StartCPUReduce(d_g_state);
//panda_cpu_task_info_t *panda_cpu_task_info = (panda_cpu_task_info_t *)ptr;
//cpu_context *d_g_state = (cpu_context *)(panda_cpu_task_info->d_g_state);
//DoLog("panda_cpu_task_info_t start_row_id_idx:%d end_idx:%d",panda_cpu_task_info->start_row_id_idx, panda_cpu_task_info->end_idx);
for (int map_idx = 0; map_idx < d_g_state->sorted_keyvals_arr_len; map_idx++){
keyvals_t *kv_p = (keyvals_t *)(&(d_g_state->sorted_intermediate_keyvals_arr[map_idx]));
cpu_reduce(kv_p->key, kv_p->vals, kv_p->keySize, kv_p->val_arr_len, d_g_state);
}//for
DoLog("DONE");
}//if
//hipFree(d_filebuf);
//handle the buffer different
//free(h_filebuf);
//handle the buffer different
return NULL;
}//void
//--------------------------------------------------------
//get a value from value list of the same key
//
//param : vals
//param : interOffsetSizes
//param : index
//return: the wanted value
//--------------------------------------------------------
__device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex)
{
int4 offset = interOffsetSizes[valStartIndex];
return (void*)((char*)vals + keyIndex * offset.w);
}
__device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex)
{
int4 offset = interOffsetSizes[valStartIndex];
return (void*)((char*)key + keyIndex * offset.y);
}
//-------------------------------------------------------
//Reducer
//-------------------------------------------------------
__global__ void ReducePartitioner(gpu_context d_g_state)
{
int num_records_per_thread = (d_g_state.d_sorted_keyvals_arr_len+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x);
int block_start_row_id_idx = num_records_per_thread*blockIdx.x*blockDim.x;
int thread_start_row_id_idx = block_start_row_id_idx
+ (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE
+ (threadIdx.x%STRIDE);
int thread_end_idx = thread_start_row_id_idx+num_records_per_thread*STRIDE;
if(thread_end_idx>d_g_state.d_sorted_keyvals_arr_len)
thread_end_idx = d_g_state.d_sorted_keyvals_arr_len;
//printf("ReducePartitioner: TID:%d start_row_id_idx:%d end_idx:%d d_sorted_keyvals_arr_len:%d\n",TID,thread_start_row_id_idx,thread_end_idx,d_g_state.d_sorted_keyvals_arr_len);
int start_row_id, end;
for(int reduce_task_idx=thread_start_row_id_idx; reduce_task_idx < thread_end_idx; reduce_task_idx+=STRIDE){
if (reduce_task_idx==0)
start_row_id = 0;
else
start_row_id = d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[reduce_task_idx-1];
end = d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[reduce_task_idx];
val_t *val_t_arr = (val_t*)malloc(sizeof(val_t)*(end-start_row_id));
//assert(val_t_arr!=NULL);
int keySize = d_g_state.d_keyval_pos_arr[start_row_id].keySize;
int keyPos = d_g_state.d_keyval_pos_arr[start_row_id].keyPos;
void *key = (char*)d_g_state.d_sorted_keys_shared_buff+keyPos;
//printf("keySize;%d keyPos:%d key:%s\n",keySize,keyPos,key);
//printf("reduce_task_idx:%d keyPos:%d, keySize:%d, key:% start_row_id:%d end:%d\n",reduce_task_idx,keyPos,keySize,start_row_id,end);
//printf("start_row_id:%d end:%d\n",start_row_id,end);
for (int index = start_row_id;index<end;index++){
int valSize = d_g_state.d_keyval_pos_arr[index].valSize;
int valPos = d_g_state.d_keyval_pos_arr[index].valPos;
val_t_arr[index-start_row_id].valSize = valSize;
val_t_arr[index-start_row_id].val = (char*)d_g_state.d_sorted_vals_shared_buff + valPos;
} //for
gpu_reduce(key, val_t_arr, keySize, end-start_row_id, d_g_state);
}//for
}
//----------------------------------------------
//start_row_id reduce
//
//1, if there is not a reduce phase, just return
// then user uses spec->interKeys/spec->intervals
// for further processing
//2, get reduce input data on host
//3, upload reduce input data onto device memory
//4, determine the number of threads to run
//5, calculate output data keys'buf size
// and values' buf size
//6, do prefix sum on--
// i) d_outputKeysSizePerTask
// ii) d_outputValsSizePerTask
// iii) d_outputCountPerTask
//7, allocate output memory on device memory
//8, start_row_id reduce
//9, copy output data to Spect_t structure
//10,free allocated memory
//----------------------------------------------
void StartGPUReduce(gpu_context *d_g_state)
{
hipDeviceSynchronize();
d_g_state->d_reduced_keyval_arr_len = d_g_state->d_sorted_keyvals_arr_len;
checkCudaErrors(hipMalloc((void **)&(d_g_state->d_reduced_keyval_arr), sizeof(keyval_t)*d_g_state->d_reduced_keyval_arr_len));
DoLog("number of reduce tasks:%d",d_g_state->d_sorted_keyvals_arr_len);
hipDeviceSynchronize();
int num_blocks = (d_g_state->num_reducers+(NUM_THREADS)-1)/(NUM_THREADS);
DoLog("num_blocks:%d NUM_THREADS:%d",num_blocks,NUM_THREADS);
hipLaunchKernelGGL(( ReducePartitioner), dim3(num_blocks),dim3(NUM_THREADS), 0, 0, *d_g_state);
hipDeviceSynchronize();
DoLog("DONE");
}//void
/*void *Panda_CPU_Map(void *ptr){
return NULL;
}*/
void* Panda_Map(void *ptr){
//DoLog("panda_map");
thread_info_t *thread_info = (thread_info_t *)ptr;
if(thread_info->device_type == GPU_ACC){
gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state);
//DoLog("gpu_id in current Panda_Map:%d",gpu_id);
InitGPUDevice(thread_info);
DoLog("GPU_ID:[%d] Init GPU MapReduce Load Data From Host to GPU memory",d_g_state->gpu_id);
InitGPUMapReduce3(d_g_state);
//printData2<<<1,1>>>(*d_g_state);
DoLog("GPU_ID:[%d] Start GPU Map Tasks",d_g_state->gpu_id);
StartGPUMap(d_g_state);
//d_g_state->d_intermediate_keyval_total_count;
//checkCudaErrors(hipMemset(d_g_state->d_intermediate_keyval_total_count,0,d_g_state->num_input_record*sizeof(int)));
StartGPUShuffle(d_g_state);
}//if
if(thread_info->device_type == CPU_ACC){
//DoLog("CPU_ACC");
cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state);
DoLog("CPU_GROUP_ID:[%d] Init CPU Device",d_g_state->cpu_group_id);
InitCPUDevice(thread_info);
//DoLog("Init CPU MapReduce");
InitCPUMapReduce2(thread_info);
DoLog("CPU_GROUP_ID:[%d] Start CPU Map Tasks",d_g_state->cpu_group_id);
StartCPUMap2(thread_info);
//d_g_state->d_intermediate_keyval_total_count;
//checkCudaErrors(hipMemset(d_g_state->d_intermediate_keyval_total_count,0,d_g_state->num_input_record*sizeof(int)));
StartCPUShuffle2(thread_info);
}
return NULL;
}//FinishMapReduce2(d_g_state);
void FinishMapReduce(Spec_t* spec)
{
DoLog( "=====finish map/reduce=====");
}//void
void FinishMapReduce2(gpu_context* state)
{
size_t total_mem,avail_mem, heap_limit;
checkCudaErrors(hipMemGetInfo( &avail_mem, &total_mem ));
DoLog("avail_mem:%d",avail_mem);
}//void
#endif //__PANDALIB_CU__ | 934ec8f79ee2ecdadb2bba8dda00f04069111ed9.cu | /*
Copyright 2012 The Trustees of Indiana University. All rights reserved.
CGL MapReduce Framework on GPUs and CPUs
Code Name: Panda
File: PandaLib.cu
First Version: 2012-07-01 V0.1
Current Version: V0.3
Last Updates: 2012-8-29
Developer: Hui Li (lihui@indiana.edu)
This is the source code for Panda, a MapReduce runtime on GPUs and CPUs.
*/
#ifndef __PANDALIB_CU__
#define __PANDALIB_CU__
#include "Panda.h"
#include "stdlib.h"
#include "map.cu"
#include "reduce.cu"
//----------------------------------------------
//Get default runtime configuration
//return: default spec
//----------------------------------------------
job_configuration *GetJobConf(){
job_configuration *job_conf = (job_configuration *)malloc(sizeof(job_configuration));
if (job_conf == NULL) exit(-1);
memset(job_conf, 0, sizeof(job_configuration));
job_conf->num_input_record = 0;
job_conf->input_keyval_arr = NULL;
job_conf->auto_tuning = false;
job_conf->num_mappers = 0;
job_conf->num_reducers = 0;
job_conf->num_gpus = 0;
job_conf->num_cpus_cores = 0;
job_conf->num_cpus_groups = 0;
return job_conf;
}//gpu_context
gpu_context *GetGPUContext(){
gpu_context *d_g_state = (gpu_context*)malloc(sizeof(gpu_context));
if (d_g_state == NULL) exit(-1);
memset(d_g_state, 0, sizeof(gpu_context));
d_g_state->configured = false;
d_g_state->h_input_keyval_arr = NULL;
d_g_state->num_mappers = 0;
d_g_state->num_reducers = 0;
return d_g_state;
}//gpu_context
cpu_context *GetCPUContext(){
cpu_context *d_g_state = (cpu_context*)malloc(sizeof(cpu_context));
if (d_g_state == NULL) exit(-1);
memset(d_g_state, 0, sizeof(cpu_context));
d_g_state->configured = false;
d_g_state->input_keyval_arr = NULL;
return d_g_state;
}//gpu_context
panda_context *GetPandaContext(){
panda_context *d_g_state = (panda_context*)malloc(sizeof(panda_context));
if (d_g_state == NULL) exit(-1);
d_g_state->input_keyval_arr = NULL;
d_g_state->intermediate_keyval_arr_arr_p = NULL;
d_g_state->sorted_intermediate_keyvals_arr = NULL;
d_g_state->sorted_keyvals_arr_len = 0;
d_g_state->num_gpus = 0;
d_g_state->gpu_context = NULL;
d_g_state->num_cpus_groups = 0;
d_g_state->cpu_context = NULL;
return d_g_state;
}//gpu_context
//For version 0.3
void InitCPUMapReduce2(thread_info_t * thread_info){
cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state);
job_configuration *job_conf = (job_configuration *)(thread_info->job_conf);
if (job_conf->num_input_record<=0) { DoLog("Error: no any input keys"); exit(-1);}
if (job_conf->input_keyval_arr == NULL) { DoLog("Error: input_keyval_arr == NULL"); exit(-1);}
if (d_g_state->num_cpus_cores <= 0) { DoLog("Error: d_g_state->num_cpus == 0"); exit(-1);}
//DoLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false");
//if (d_g_state->configured)
// return;
int totalKeySize = 0;
int totalValSize = 0;
for(int i=0;i<job_conf->num_input_record;i++){
totalKeySize += job_conf->input_keyval_arr[i].keySize;
totalValSize += job_conf->input_keyval_arr[i].valSize;
}//for
DoLog("CPU_GROUP_ID:[%d] num_input_record:%d, totalKeySize:%d totalValSize:%d num_cpus:%d",
d_g_state->cpu_group_id, job_conf->num_input_record, totalKeySize, totalValSize, d_g_state->num_cpus_cores);
//TODO determin num_cpus
//d_g_state->num_cpus = 12;
int num_cpus_cores = d_g_state->num_cpus_cores;
d_g_state->panda_cpu_task = (pthread_t *)malloc(sizeof(pthread_t)*(num_cpus_cores));
d_g_state->panda_cpu_task_info = (panda_cpu_task_info_t *)malloc(sizeof(panda_cpu_task_info_t)*(num_cpus_cores));
d_g_state->intermediate_keyval_arr_arr_p = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*job_conf->num_input_record);
memset(d_g_state->intermediate_keyval_arr_arr_p, 0, sizeof(keyval_arr_t)*job_conf->num_input_record);
for (int i=0;i<num_cpus_cores;i++){
d_g_state->panda_cpu_task_info[i].d_g_state = d_g_state;
d_g_state->panda_cpu_task_info[i].cpu_job_conf = job_conf;
d_g_state->panda_cpu_task_info[i].num_cpus_cores = num_cpus_cores;
d_g_state->panda_cpu_task_info[i].start_row_id_idx = 0;
d_g_state->panda_cpu_task_info[i].end_idx = 0;
}//for
d_g_state->configured = true;
DoLog("CPU_GROUP_ID:[%d] DONE",d_g_state->cpu_group_id);
}
//For Version 0.2 depressed
void InitCPUMapReduce(cpu_context* d_g_state)
{
#ifdef DEV_MODE
if (d_g_state->num_input_record<=0) { DoLog("Error: no any input keys"); exit(-1);}
if (d_g_state->input_keyval_arr == NULL) { DoLog("Error: input_keyval_arr == NULL"); exit(-1);}
if (d_g_state->num_cpus_cores <= 0) { DoLog("Error: d_g_state->num_cpus == 0"); exit(-1);}
//DoLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false");
//if (d_g_state->configured)
// return;
DoLog("d_g_state->num_input_record:%d",d_g_state->num_input_record);
int totalKeySize = 0;
int totalValSize = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
totalKeySize += d_g_state->input_keyval_arr[i].keySize;
totalValSize += d_g_state->input_keyval_arr[i].valSize;
}//for
DoLog("totalKeySize:%d totalValSize:%d num_cpus:%d", totalKeySize, totalValSize, d_g_state->num_cpus_cores);
int num_cpus_cores = d_g_state->num_cpus_cores;
d_g_state->panda_cpu_task = (pthread_t *)malloc(sizeof(pthread_t)*(num_cpus_cores));
d_g_state->panda_cpu_task_info = (panda_cpu_task_info_t *)malloc(sizeof(panda_cpu_task_info_t)*(num_cpus_cores));
d_g_state->intermediate_keyval_arr_arr_p = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*d_g_state->num_input_record);
memset(d_g_state->intermediate_keyval_arr_arr_p, 0, sizeof(keyval_arr_t)*d_g_state->num_input_record);
for (int i=0;i<num_cpus_cores;i++){
d_g_state->panda_cpu_task_info[i].d_g_state = d_g_state;
d_g_state->panda_cpu_task_info[i].num_cpus_cores = num_cpus_cores;
d_g_state->panda_cpu_task_info[i].start_row_id_idx = 0;
d_g_state->panda_cpu_task_info[i].end_idx = 0;
}//for
d_g_state->configured = true;
DoLog("DONE");
#endif
}//void
//For Version 0.3 test depressed
void InitGPUMapReduce4(thread_info_t* thread_info)
{
#ifdef DEV_MODE
gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state);
job_configuration* gpu_job_conf = (job_configuration*)(thread_info->job_conf);
keyval_t * kv_p = gpu_job_conf->input_keyval_arr;
DoLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false");
//if (d_g_state->configured)
// return;
DoLog("copy %d input records from Host to GPU memory",gpu_job_conf->num_input_record);
//checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record));
int totalKeySize = 0;
int totalValSize = 0;
for(int i=0;i<gpu_job_conf->num_input_record;i++){
totalKeySize += kv_p[i].keySize;
totalValSize += kv_p[i].valSize;
}//for
DoLog("totalKeySize:%d totalValSize:%d", totalKeySize, totalValSize);
void *input_vals_shared_buff = malloc(totalValSize);
void *input_keys_shared_buff = malloc(totalKeySize);
keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*gpu_job_conf->num_input_record);
int keyPos = 0;
int valPos = 0;
int keySize = 0;
int valSize = 0;
for(int i=0; i<gpu_job_conf->num_input_record; i++){
keySize = kv_p[i].keySize;
valSize = kv_p[i].valSize;
memcpy((char *)input_keys_shared_buff + keyPos,(char *)(kv_p[i].key), keySize);
memcpy((char *)input_vals_shared_buff + valPos,(char *)(kv_p[i].val), valSize);
input_keyval_pos_arr[i].keySize = keySize;
input_keyval_pos_arr[i].keyPos = keyPos;
input_keyval_pos_arr[i].valPos = valPos;
input_keyval_pos_arr[i].valSize = valSize;
keyPos += keySize;
valPos += valSize;
}//for
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*gpu_job_conf->num_input_record));
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize));
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize));
checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*gpu_job_conf->num_input_record ,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,cudaMemcpyHostToDevice));
//checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,cudaMemcpyHostToDevice));
cudaThreadSynchronize();
d_g_state->configured = true;
//(thread_info->d_g_state) = d_g_state;
#endif
}//void
void InitGPUMapReduce3(gpu_context* d_g_state)
{
DoLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false");
//if (d_g_state->configured)
// return;
//checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record));
int totalKeySize = 0;
int totalValSize = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
totalKeySize += d_g_state->h_input_keyval_arr[i].keySize;
totalValSize += d_g_state->h_input_keyval_arr[i].valSize;
}//for
DoLog("copy %d input records from Host to GPU memory totalKeySize:%d totalValSize:%d",d_g_state->num_input_record, totalKeySize, totalValSize);
void *input_vals_shared_buff = malloc(totalValSize);
void *input_keys_shared_buff = malloc(totalKeySize);
keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*d_g_state->num_input_record);
int keyPos = 0;
int valPos = 0;
int keySize = 0;
int valSize = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
keySize = d_g_state->h_input_keyval_arr[i].keySize;
valSize = d_g_state->h_input_keyval_arr[i].valSize;
memcpy((char *)input_keys_shared_buff + keyPos,(char *)(d_g_state->h_input_keyval_arr[i].key), keySize);
memcpy((char *)input_vals_shared_buff + valPos,(char *)(d_g_state->h_input_keyval_arr[i].val), valSize);
input_keyval_pos_arr[i].keySize = keySize;
input_keyval_pos_arr[i].keyPos = keyPos;
input_keyval_pos_arr[i].valPos = valPos;
input_keyval_pos_arr[i].valSize = valSize;
keyPos += keySize;
valPos += valSize;
}//for
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record));
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize));
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize));
checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record ,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,cudaMemcpyHostToDevice));
//checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,cudaMemcpyHostToDevice));
cudaThreadSynchronize();
d_g_state->configured = true;
}//void
#ifdef DEV_MODE
void InitGPUMapReduce2(gpu_context* d_g_state)
{
DoLog("d_g_state->num_input_record:%d",d_g_state->num_input_record);
//checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record));
int totalKeySize = 0;
int totalValSize = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
totalKeySize += d_g_state->h_input_keyval_arr[i].keySize;
totalValSize += d_g_state->h_input_keyval_arr[i].valSize;
}//for
void *input_vals_shared_buff = malloc(totalValSize);
void *input_keys_shared_buff = malloc(totalKeySize);
keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*d_g_state->num_input_record);
int keyPos = 0;
int valPos = 0;
int keySize = 0;
int valSize = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
keySize = d_g_state->h_input_keyval_arr[i].keySize;
valSize = d_g_state->h_input_keyval_arr[i].valSize;
memcpy((char *)input_keys_shared_buff + keyPos,(char *)(d_g_state->h_input_keyval_arr[i].key), keySize);
memcpy((char *)input_vals_shared_buff + valPos,(char *)(d_g_state->h_input_keyval_arr[i].val), valSize);
input_keyval_pos_arr[i].keySize = keySize;
input_keyval_pos_arr[i].keyPos = keyPos;
input_keyval_pos_arr[i].valPos = valPos;
input_keyval_pos_arr[i].valSize = valSize;
keyPos += keySize;
valPos += valSize;
}//for
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record));
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize));
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize));
checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record ,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,cudaMemcpyHostToDevice));
//checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,cudaMemcpyHostToDevice));
cudaThreadSynchronize();
}//void
#endif
void InitCPUDevice(thread_info_t*thread_info){
//------------------------------------------
//1, init CPU device
//------------------------------------------
cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state);
if (d_g_state->num_cpus_cores<=0) d_g_state->num_cpus_cores = getCPUCoresNum();
int tid = thread_info->tid;
DoLog( "Init CPU Deivce tid:%d",tid);
//char *fn = thread_info->file_name;
//"% num_gpus" allows more CPU threads than GPU devices
}
void InitGPUDevice(thread_info_t*thread_info){
//------------------------------------------
//1, init device
//------------------------------------------
gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state);
int tid = thread_info->tid;
int num_gpus = d_g_state->num_gpus;
if (num_gpus == 0) {
DoLog("error num_gpus == 0");
exit(-1);
}
int gpu_id;
cudaGetDevice(&gpu_id);
int gpu_count = 0;
cudaGetDeviceCount(&gpu_count);
DoLog("check GPU Device IDs -> tid:%d current gpu_id:%d num_gpus by user:%d cudaGetDeviceCount:%d", tid, gpu_id, num_gpus, gpu_count);
if ( gpu_id != tid ){
//DoLog("cudaSetDevice gpu_id %d == (tid num_gpus) %d ", gpu_id, tid%num_gpus);
cudaSetDevice(tid % num_gpus);
}//if
cudaGetDevice(&gpu_id);
d_g_state->gpu_id = gpu_id;
size_t total_mem,avail_mem, heap_limit;
checkCudaErrors(cudaMemGetInfo( &avail_mem, &total_mem ));
cudaDeviceSetLimit(cudaLimitMallocHeapSize, (int)(total_mem*0.2));
cudaDeviceGetLimit(&heap_limit, cudaLimitMallocHeapSize);
DoLog("TID:[%d] num_gpus:%d gpu_id:%d ",tid,num_gpus,gpu_id);
DoLog("GPU_ID:[%d] cudaLimitMallocHeapSize:%d MB avail_mem:%d MB total_mem:%d MB",gpu_id, heap_limit/1024/1024, avail_mem/1024/1024,total_mem/1024/1024);
}
//Ratio = Tcpu/Tgpu
//Tcpu = (execution time on CPU cores for sampled tasks)/(#sampled tasks)
//Tgpu = (execution time on 1 GPU for sampled tasks)/(#sampled tasks)
float Smart_Scheduler(job_configuration *job_conf){
#ifdef DEV_MODE
int num_gpus = 1;//job_conf->num_gpus;
int num_cpus_cores = getCPUCoresNum();//job_conf->num_cpus;
if (num_cpus_cores >2)
num_cpus_cores -= 2;
int num_cpus_group = 1;//job_conf->num_cpus_groups;
panda_context *panda = GetPandaContext();
pthread_t *no_threads = (pthread_t*)malloc(sizeof(pthread_t)*(num_gpus + num_cpus_group));
thread_info_t *thread_info = (thread_info_t*)malloc(sizeof(thread_info_t)*(num_gpus + num_cpus_group));
for (int i=0; i<num_gpus; i++){
thread_info[i].tid = i;
//thread_info[i].file_name = argv[i+1];
thread_info[i].num_gpus = num_gpus;
thread_info[i].device_type = GPU_ACC;
cudaDeviceProp gpu_dev;
cudaGetDeviceProperties(&gpu_dev, i);
thread_info[i].device_name = gpu_dev.name;
gpu_context *d_g_state = GetGPUContext();
//d_g_state->matrix_size = job_conf->matrix_size;
d_g_state->num_mappers = job_conf->num_mappers;
d_g_state->num_reducers = job_conf->num_reducers;
thread_info[i].d_g_state = d_g_state;
}//for num_gpus
for (int i=num_gpus; i<num_gpus+num_cpus_group; i++){
thread_info[i].tid = i;
thread_info[i].device_type = CPU_ACC;
cpu_context *d_g_state = GetCPUContext();
d_g_state->num_cpus_cores = num_cpus_cores;
thread_info[i].d_g_state = d_g_state;
}//for
DoLog("num_gpus:%d num_cpus_group:%d num_input_record:%d sizeof(int):%d\n", num_gpus, num_cpus_group,job_conf->num_input_record,sizeof(int));
int cpu_sampled_tasks_num = 0;
int gpu_sampled_tasks_num = 0;
cpu_sampled_tasks_num = num_cpus_cores*job_conf->auto_tuning_sample_rate;
gpu_sampled_tasks_num = getGPUCoresNum()*job_conf->auto_tuning_sample_rate;
if (cpu_sampled_tasks_num>job_conf->num_input_record)
cpu_sampled_tasks_num = job_conf->num_input_record;
if (gpu_sampled_tasks_num>job_conf->num_input_record)
gpu_sampled_tasks_num = job_conf->num_input_record;
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_group); dev_id++){
int start_row_id_id = 0;
int end_id = 0;//job_conf->num_cpus_cores*2; //(job_conf->num_input_record/100);
if (thread_info[dev_id].device_type == GPU_ACC){
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
end_id = start_row_id_id + gpu_sampled_tasks_num;
AddMapInputRecordGPU(d_g_state,(job_conf->input_keyval_arr), start_row_id_id, end_id);
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
end_id = start_row_id_id + cpu_sampled_tasks_num;
AddMapInputRecordCPU(d_g_state,(job_conf->input_keyval_arr), start_row_id_id, end_id);
}//if
}//for
double t1 = PandaTimer();
Panda_Map((void *)&(thread_info[0]));
double t2 = PandaTimer();
//start_row_id cpu
Panda_Map((void *)&(thread_info[1]));
double t3 = PandaTimer();
double t_cpu = (t3-t2);///cpu_sampled_tasks_num;
double t_gpu = (t2-t1);///gpu_sampled_tasks_num;
if (t_gpu<0.0001)
t_gpu=0.0001;
double ratio = (t_cpu*gpu_sampled_tasks_num)/(t_gpu*cpu_sampled_tasks_num);
char log[128];
sprintf(log," cpu_sampled_tasks:%d cpu time:%f cpu time per task:%f", cpu_sampled_tasks_num, t_cpu, t_cpu/(cpu_sampled_tasks_num));
DoDiskLog(log);
sprintf(log," gpu_sampled_tasks:%d gpu time:%f gpu time per task:%f ratio:%f", gpu_sampled_tasks_num, t_gpu, t_gpu/(gpu_sampled_tasks_num), ratio);
DoDiskLog(log);
#endif
return (1.0);
}//void
//--------------------------------------------------
// PandaMetaScheduler
//--------------------------------------------------
/*
* 1) input a set of panda worker (thread)
* 2) each panda worker consist of one panda job and pand device
* 3) copy input data from pand job to pand device
*/
void PandaMetaScheduler(thread_info_t *thread_info, int num_gpus, int num_cpus_groups){
panda_context *panda = GetPandaContext();
panda->num_gpus = num_gpus;
panda->num_cpus_groups = num_cpus_groups;
pthread_t *no_threads = (pthread_t*)malloc(sizeof(pthread_t)*(num_gpus + num_cpus_groups));
for (int dev_id=0; dev_id<(num_gpus + num_cpus_groups); dev_id++){
if (thread_info[dev_id].device_type == GPU_ACC){
job_configuration* gpu_job_conf = (job_configuration*)(thread_info[dev_id].job_conf);
gpu_context *d_g_state = GetGPUContext();
d_g_state->num_mappers = gpu_job_conf->num_mappers;
d_g_state->num_reducers = gpu_job_conf->num_reducers;
d_g_state->num_gpus = num_gpus;
thread_info[dev_id].tid = dev_id;
thread_info[dev_id].d_g_state = d_g_state;
DoLog("DEV_ID:[%d] GPU_ACC TID:%d",dev_id,thread_info[dev_id].tid);
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context *d_g_state = GetCPUContext();
thread_info[dev_id].tid = dev_id;
thread_info[dev_id].d_g_state = d_g_state;
DoLog("DEV_ID:[%d] CPU_ACC TID:%d",dev_id,thread_info[dev_id].tid);
}//if
}//for
///////////////////////////////////////////////////
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_groups); dev_id++){
if (thread_info[dev_id].device_type == GPU_ACC){
job_configuration *gpu_job_conf = (job_configuration *)(thread_info[dev_id].job_conf);
int start_row_id_id = 0;
int end_id = gpu_job_conf->num_input_record;
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
AddMapInputRecordGPU(d_g_state,(gpu_job_conf->input_keyval_arr), start_row_id_id,end_id);
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
job_configuration *cpu_job_conf = (job_configuration *)(thread_info[dev_id].job_conf);
int start_row_id_id = 0;
int end_id = cpu_job_conf->num_input_record;
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
AddMapInputRecordCPU(d_g_state,(cpu_job_conf->input_keyval_arr),start_row_id_id, end_id);
}//if
}//for
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_groups); dev_id++){
if (pthread_create(&(no_threads[dev_id]), NULL, Panda_Map, (char *)&(thread_info[dev_id])) != 0)
perror("Thread creation failed!\n");
}//for
for (int i = 0; i < num_gpus + num_cpus_groups; i++){
void *exitstat;
if (pthread_join(no_threads[i],&exitstat)!=0) perror("joining failed");
}//for
//DoLog("start to merge results of GPU's and CPU's device to Panda scheduler");
for (int i = 0; i < num_gpus+num_cpus_groups; i++){
if (thread_info[i].device_type == CPU_ACC)
Panda_Shuffle_Merge_CPU((panda_context*)panda, (cpu_context*)(thread_info[i].d_g_state));
if (thread_info[i].device_type == GPU_ACC)
Panda_Shuffle_Merge_GPU((panda_context*)panda, (gpu_context*)(thread_info[i].d_g_state));
}//for
//TODO reduce task ratio
int num_sorted_intermediate_record = panda->sorted_keyvals_arr_len;
int records_per_device = num_sorted_intermediate_record/(num_gpus*10+num_cpus_groups);
int *split = (int*)malloc(sizeof(int)*(num_gpus+num_cpus_groups));
for (int i=0;i<num_gpus;i++){
split[i] = records_per_device*10*(i+1);
}//for
for (int i=num_gpus;i<num_gpus+num_cpus_groups;i++){
split[i] = records_per_device*10*(num_gpus)+(i+1)*records_per_device;
}//for
split[num_gpus + num_cpus_groups-1] = num_sorted_intermediate_record;
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_groups); dev_id++){
int start_row_id_id = 0;
if (dev_id>0) start_row_id_id = split[dev_id-1];
int end_id = split[dev_id];
if (thread_info[dev_id].device_type == GPU_ACC){
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
AddReduceInputRecordGPU(d_g_state,(panda->sorted_intermediate_keyvals_arr),start_row_id_id, end_id);
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
AddReduceInputRecordCPU(d_g_state,(panda->sorted_intermediate_keyvals_arr),start_row_id_id, end_id);
}//if
}//for
for (int dev_id = 0; dev_id < (num_gpus+num_cpus_groups); dev_id++){
if (pthread_create(&(no_threads[dev_id]),NULL,Panda_Reduce,(char *)&(thread_info[dev_id]))!=0)
perror("Thread creation failed!\n");
}//for
for (int i=0; i < num_gpus + num_cpus_groups; i++){
void *exitstat;
if (pthread_join(no_threads[i],&exitstat)!=0) perror("joining failed");
}//for
///////////////////////////////////////////////////////////////////////////////////////////////////////
//Panda_Reduce_Merge(&thread_info[num_gpus-1]); //
///////////////////////////////////////////////////////////////////////////////////////////////////////
int total_output_records = 0;
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_groups); dev_id++){
if (thread_info[dev_id].device_type == GPU_ACC){
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
total_output_records += d_g_state->d_reduced_keyval_arr_len;
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
total_output_records += d_g_state->sorted_keyvals_arr_len;
}//if
}//for
DoLog("number of reduce output:%d\n",total_output_records);
DoLog("=====panda mapreduce job finished=====");
}//PandaMetaScheduler
void Start_Panda_Job(job_configuration *job_conf){
#ifdef DEV_MODE
int num_gpus = job_conf->num_gpus;
int num_cpus_cores = job_conf->num_cpus_cores;
int num_cpus_group = job_conf->num_cpus_groups;
panda_context *panda = GetPandaContext();
panda->num_gpus = num_gpus;
panda->num_cpus_groups = num_cpus_group;
DoLog("Start num_gpus:%d num_cpus_groups:%d", num_gpus, num_cpus_group);
pthread_t *no_threads = (pthread_t*)malloc(sizeof(pthread_t)*(num_gpus + num_cpus_group));
thread_info_t *thread_info = (thread_info_t*)malloc(sizeof(thread_info_t)*(num_gpus + num_cpus_group));
for (int i=0; i<num_gpus; i++){
thread_info[i].tid = i;
//thread_info[i].file_name = argv[i+1];
thread_info[i].num_gpus = num_gpus;
thread_info[i].device_type = GPU_ACC;
cudaDeviceProp gpu_dev;
cudaGetDeviceProperties(&gpu_dev, i);
//DoLog("Configure Device ID:%d: Device Name:%s MultProcessorCount:%d sm_per_multiproc:%d", i, gpu_dev.name,gpu_dev.multiProcessorCount,sm_per_multiproc);
thread_info[i].device_name = gpu_dev.name;
gpu_context *d_g_state = GetGPUContext();
d_g_state->matrix_size = job_conf->matrix_size;
d_g_state->num_mappers = job_conf->num_mappers;
d_g_state->num_reducers = job_conf->num_reducers;
thread_info[i].d_g_state = d_g_state;
}//for num_gpus
for (int i=num_gpus; i<num_gpus+num_cpus_group; i++){
thread_info[i].tid = i;
thread_info[i].device_type = CPU_ACC;
cpu_context *d_g_state = GetCPUContext();
d_g_state->num_cpus_cores = num_cpus_cores;
thread_info[i].d_g_state = d_g_state;
}//for
///////////////////////////////////////////////////
double ratio = 10.0;
ratio = (double)(job_conf->ratio);
if (job_conf->auto_tuning){
ratio = (Smart_Scheduler(job_conf));
job_conf->ratio = ratio;
}//if
//////////////////////////////////////////////////
DoLog("num_gpus:%d num_cpus_group:%d num_input_record:%d sizeof(int):%d ratio:%f\n", num_gpus, num_cpus_group,job_conf->num_input_record,sizeof(int),ratio);
//DoLog("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Smart Scheduler Test"); return;
int *split = NULL;
split = (int *)malloc(sizeof(int)*(num_gpus+num_cpus_group));
int num_input_record = job_conf->num_input_record;
int records_per_device = (int)(num_input_record/(num_gpus*ratio+num_cpus_group));
for (int i=0;i<num_gpus;i++){
split[i] = (int)(records_per_device*ratio*(i+1));
}//for
for (int i=num_gpus;i<num_gpus+num_cpus_group;i++){
split[i] = (int)(records_per_device*ratio*(num_gpus)+(i+1)*records_per_device);
}//for
split[num_gpus+num_cpus_group-1] = num_input_record;
printf("--- split:num_input_record:%d records_per_device:%d ",num_input_record, records_per_device);
for (int i=0;i<num_gpus+num_cpus_group;i++)
printf("%d\t",split[i]);
printf("\n");
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_group); dev_id++){
int start_row_id_id = 0;
if (dev_id>0) start_row_id_id = split[dev_id-1];
int end_id = split[dev_id];
if (thread_info[dev_id].device_type == GPU_ACC){
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
//for (int i=start_row_id_id; i<end_id; i++){
//printf(":%s keySize:%d",job_conf->input_keyval_arr[i].val, job_conf->input_keyval_arr[i].valSize);
AddMapInputRecordGPU(d_g_state,(job_conf->input_keyval_arr), start_row_id_id,end_id);
//}//for
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
//for (int i=start_row_id_id;i<end_id;i++){
AddMapInputRecordCPU(d_g_state,(job_conf->input_keyval_arr),start_row_id_id, end_id);
//}//for
}//if
}//for
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_group); dev_id++){
//if (thread_info[dev_id].device_type == GPU_ACC){
if (pthread_create(&(no_threads[dev_id]), NULL, Panda_Map, (char *)&(thread_info[dev_id])) != 0)
perror("Thread creation failed!\n");
}//for
for (int i=0; i < num_gpus + num_cpus_group; i++){
void *exitstat;
if (pthread_join(no_threads[i],&exitstat)!=0) perror("joining failed");
}//for
DoLog("start_row_id to merge!");
for (int i = 0; i<num_gpus; i++){
Panda_Shuffle_Merge_GPU((panda_context*)panda, (gpu_context*)(thread_info[i].d_g_state));
}//for
for (int i = num_gpus; i < num_gpus+num_cpus_group; i++){
Panda_Shuffle_Merge_CPU((panda_context*)panda, (cpu_context*)(thread_info[i].d_g_state));
}//for
DoLog("totoal number of different intermediate records:%d",panda->sorted_keyvals_arr_len);
//TOD smart job for reduce ratio
//cudaThreadSynchronize();
//static scheduling -- split the workload between devices
int num_sorted_intermediate_record = panda->sorted_keyvals_arr_len;
records_per_device = num_sorted_intermediate_record/(num_gpus*10+num_cpus_group);
for (int i=0;i<num_gpus;i++){
split[i] = records_per_device*10*(i+1);
}//for
for (int i=num_gpus;i<num_gpus+num_cpus_group;i++){
split[i] = records_per_device*10*(num_gpus)+(i+1)*records_per_device;
}//for
split[num_gpus + num_cpus_group-1] = num_sorted_intermediate_record;
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_group); dev_id++){
int start_row_id_id = 0;
if (dev_id>0) start_row_id_id = split[dev_id-1];
int end_id = split[dev_id];
if (thread_info[dev_id].device_type == GPU_ACC){
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
//for (int i=start_row_id_id; i<end_id; i++){
AddReduceInputRecordGPU(d_g_state,(panda->sorted_intermediate_keyvals_arr),start_row_id_id, end_id);
//}//for
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
//for (int i=start_row_id_id;i<end_id;i++){
AddReduceInputRecordCPU(d_g_state,(panda->sorted_intermediate_keyvals_arr),start_row_id_id, end_id);
//}//for
}//if
}//for
for (int dev_id = 0; dev_id < (num_gpus+num_cpus_group); dev_id++){
if (pthread_create(&(no_threads[dev_id]),NULL,Panda_Reduce,(char *)&(thread_info[dev_id]))!=0)
perror("Thread creation failed!\n");
}//for
for (int i=0; i < num_gpus + num_cpus_group; i++){
void *exitstat;
if (pthread_join(no_threads[i],&exitstat)!=0) perror("joining failed");
}//for
///////////////////////////////////////////////////////////////////////////////////////////////////////
//Panda_Reduce(&thread_info[num_gpus-1]);
///////////////////////////////////////////////////////////////////////////////////////////////////////
DoLog("Finishing Panda MapReduce Job...");
int total_output_records = 0;
for (int dev_id = 0; dev_id<(num_gpus+num_cpus_group); dev_id++){
if (thread_info[dev_id].device_type == GPU_ACC){
gpu_context* d_g_state = (gpu_context*)(thread_info[dev_id].d_g_state);
total_output_records += d_g_state->d_reduced_keyval_arr_len;
}//if
if (thread_info[dev_id].device_type == CPU_ACC){
cpu_context* d_g_state = (cpu_context*)(thread_info[dev_id].d_g_state);
total_output_records += d_g_state->sorted_keyvals_arr_len;
}//if
}//for
DoLog("there are :%d output records\n",total_output_records);
DoLog("=====finish map/reduce=====");
#endif
}
void AddPandaTask(job_configuration* job_conf,
void* key,
void* val,
int keySize,
int valSize){
int len = job_conf->num_input_record;
if (len<0) return;
if (len == 0) job_conf->input_keyval_arr = NULL;
job_conf->input_keyval_arr = (keyval_t *)realloc(job_conf->input_keyval_arr, sizeof(keyval_t)*(len+1));
job_conf->input_keyval_arr[len].keySize = keySize;
job_conf->input_keyval_arr[len].valSize = valSize;
job_conf->input_keyval_arr[len].key = malloc(keySize);
job_conf->input_keyval_arr[len].val = malloc(valSize);
memcpy(job_conf->input_keyval_arr[len].key,key,keySize);
memcpy(job_conf->input_keyval_arr[len].val,val,valSize);
job_conf->num_input_record++;
}
void AddReduceInputRecordGPU(gpu_context* d_g_state, keyvals_t * sorted_intermediate_keyvals_arr, int start_row_idi, int endi){
long total_count = 0;
for(int i=start_row_idi;i<endi;i++){
total_count += sorted_intermediate_keyvals_arr[i].val_arr_len;
}//for
int totalKeySize = 0;
int totalValSize = 0;
for(int i=start_row_idi;i<endi;i++){
totalKeySize += sorted_intermediate_keyvals_arr[i].keySize;
for (int j=0;j<sorted_intermediate_keyvals_arr[i].val_arr_len;j++)
totalValSize += sorted_intermediate_keyvals_arr[i].vals[j].valSize;
}//for
DoLog("totalKeySize:%d totalValSize:%d ",totalKeySize,totalValSize);
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_sorted_keys_shared_buff,totalKeySize));
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_sorted_vals_shared_buff,totalValSize));
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_keyval_pos_arr,sizeof(keyval_pos_t)*total_count));
d_g_state->h_sorted_keys_shared_buff = malloc(sizeof(char)*totalKeySize);
d_g_state->h_sorted_vals_shared_buff = malloc(sizeof(char)*totalValSize);
char *sorted_keys_shared_buff = (char *)d_g_state->h_sorted_keys_shared_buff;
char *sorted_vals_shared_buff = (char *)d_g_state->h_sorted_vals_shared_buff;
char *keyval_pos_arr = (char *)malloc(sizeof(keyval_pos_t)*total_count);
int sorted_key_arr_len = (endi-start_row_idi);
keyval_pos_t *tmp_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*total_count);
DoLog("total number of different intermediate records:%d total records:%d", endi-start_row_idi, total_count);
int *pos_arr_4_pos_arr = (int*)malloc(sizeof(int)*(sorted_key_arr_len));
memset(pos_arr_4_pos_arr,0,sizeof(int)*sorted_key_arr_len);
int index = 0;
int keyPos = 0;
int valPos = 0;
for (int i=start_row_idi;i<endi;i++){
keyvals_t* p = (keyvals_t*)&(sorted_intermediate_keyvals_arr[i]);
memcpy(sorted_keys_shared_buff+keyPos,p->key, p->keySize);
for (int j=0;j<p->val_arr_len;j++){
tmp_keyval_pos_arr[index].keyPos = keyPos;
tmp_keyval_pos_arr[index].keySize = p->keySize;
tmp_keyval_pos_arr[index].valPos = valPos;
tmp_keyval_pos_arr[index].valSize = p->vals[j].valSize;
memcpy(sorted_vals_shared_buff + valPos,p->vals[j].val,p->vals[j].valSize);
valPos += p->vals[j].valSize;
index++;
}//for
keyPos += p->keySize;
pos_arr_4_pos_arr[i-start_row_idi] = index;
}//
d_g_state->d_sorted_keyvals_arr_len = endi-start_row_idi;
checkCudaErrors(cudaMemcpy(d_g_state->d_keyval_pos_arr,tmp_keyval_pos_arr,sizeof(keyval_pos_t)*total_count,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,sizeof(int)*sorted_key_arr_len));
checkCudaErrors(cudaMemcpy(d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,pos_arr_4_pos_arr,sizeof(int)*sorted_key_arr_len,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_g_state->d_sorted_keys_shared_buff, sorted_keys_shared_buff, sizeof(char)*totalKeySize,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_g_state->d_sorted_vals_shared_buff, sorted_vals_shared_buff, sizeof(char)*totalValSize,cudaMemcpyHostToDevice));
}
void AddMapInputRecordGPU(gpu_context* d_g_state,
keyval_t *kv_p, int start_row_id_id, int end_id){
int len = d_g_state->num_input_record;
//DoLog("len:%d realloc:%d",len,sizeof(keyval_t)*(len+1));
if (len == 0) d_g_state->h_input_keyval_arr = NULL;
if (len<0) return;
if (end_id<=start_row_id_id) return;
DoLog("GPU_ID:[%d] add map input record for gpu context current #input:%d added #input:%d",d_g_state->gpu_id, len, end_id - start_row_id_id);
d_g_state->h_input_keyval_arr = (keyval_t *)realloc(d_g_state->h_input_keyval_arr, sizeof(keyval_t)*(len+end_id - start_row_id_id));
//assert(d_g_state->h_input_keyval_arr != NULL);
for (int i=start_row_id_id;i<end_id;i++){
d_g_state->h_input_keyval_arr[len].keySize = kv_p[i].keySize;
d_g_state->h_input_keyval_arr[len].valSize = kv_p[i].valSize;
d_g_state->h_input_keyval_arr[len].key = kv_p[i].key;
d_g_state->h_input_keyval_arr[len].val = kv_p[i].val;
//memcpy(d_g_state->h_input_keyval_arr[len].key,key,keySize);
//memcpy(d_g_state->h_input_keyval_arr[len].val,val,valSize);
d_g_state->num_input_record++;
len++;
}
//DoLog("added %d map tasks",end_id-start_row_id_id);
}
void AddMapInputRecordCPU(cpu_context* d_g_state,
keyval_t *kv_p, int start_row_idi, int endi){
int len = d_g_state->num_input_record;
if (len<0) return;
if (len == 0) d_g_state->input_keyval_arr = NULL;
DoLog("CPU_GROUP_ID:[%d] add map input record for cpu context current #input:%d added #input:%d",d_g_state->cpu_group_id,len,endi-start_row_idi);
//DoLog("len:%d size:%d",len,sizeof(keyval_t)*(len+1));
d_g_state->input_keyval_arr = (keyval_t *)realloc(d_g_state->input_keyval_arr, sizeof(keyval_t)*(len+endi-start_row_idi));
for (int i=start_row_idi;i<endi;i++){
d_g_state->input_keyval_arr[len].keySize = kv_p[i].keySize;
d_g_state->input_keyval_arr[len].valSize = kv_p[i].valSize;
d_g_state->input_keyval_arr[len].key = kv_p[i].key;
d_g_state->input_keyval_arr[len].val = kv_p[i].val;
d_g_state->num_input_record++;
len++;
}
//DoLog("add map input record len:%d",len);
}
void AddReduceInputRecordCPU(cpu_context* d_g_state,
keyvals_t *kv_p, int start_row_id_id, int end_id){
if (end_id<start_row_id_id){ DoLog("error ! end_id <= start_row_id_id"); end_id = start_row_id_id; }
int len = d_g_state->sorted_keyvals_arr_len;
if (len<0) { DoLog("error ! len<0"); return; }
if (len == 0) d_g_state->sorted_intermediate_keyvals_arr = NULL;
//DoLog("start_row_id_id:%d, end_id:%d, len:%d\n",start_row_id_id,end_id,len);
d_g_state->sorted_intermediate_keyvals_arr = (keyvals_t *)realloc(d_g_state->sorted_intermediate_keyvals_arr,
sizeof(keyvals_t)*(len+end_id-start_row_id_id));
for (int i = len; i< len+end_id-start_row_id_id; i++){
int test = kv_p[start_row_id_id+i-len].keySize;
d_g_state->sorted_intermediate_keyvals_arr[i].keySize = kv_p[start_row_id_id+i-len].keySize;
d_g_state->sorted_intermediate_keyvals_arr[i].key = kv_p[start_row_id_id+i-len].key;
d_g_state->sorted_intermediate_keyvals_arr[i].vals = kv_p[start_row_id_id+i-len].vals;
d_g_state->sorted_intermediate_keyvals_arr[i].val_arr_len = kv_p[start_row_id_id+i-len].val_arr_len;
d_g_state->sorted_keyvals_arr_len++;
}//for
}
__device__ void Emit2 (void* key,
void* val,
int keySize,
int valSize,
gpu_context *d_g_state){
keyval_t *p = &(d_g_state->d_reduced_keyval_arr[TID]);
p->keySize = keySize;
p->key = malloc(keySize);
memcpy(p->key,key,keySize);
p->valSize = valSize;
p->val = malloc(valSize);
memcpy(p->val,val,valSize);
printf("[output]: key:%s val:%d\n",key,*(int *)val);
}//__device__
void CPUEmitIntermediate(void *key, void *val, int keySize, int valSize, cpu_context *d_g_state, int map_task_idx){
//printf(":%s :%d\n",key, *(int*)val);
keyval_arr_t *kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[map_task_idx]);
//keyval_t *p = (keyval_t *)kv_arr_p->arr;
//void *buff = kv_arr_p->buff;
//&(kv_arr_p->arr[len]) = (keyval_t*)((char *)buff - sizeof(keyval_t));
//keyval_t *kv_p = (keyval_t *)((char *)buff + kv_arr_p->buff_len - sizeof(keyval_t)*((*kv_arr_p->total_arr_len)+1));
if (kv_arr_p->arr_len==0)
kv_arr_p->arr = NULL;
kv_arr_p->arr = (keyval_t*)realloc(kv_arr_p->arr, sizeof(keyval_t)*(kv_arr_p->arr_len+1));
//(*kv_arr_p->total_arr_len)++;
/*
if (!(kv_arr_p->buff_pos +keySize+valSize < kv_arr_p->buff_len - sizeof(keyval_t)*((*kv_arr_p->total_arr_len)+1))){
printf("!!!!!!!error there is not engough shared memory\n");
return;
}*/
//printf("remain buff:%d\n", kv_arr_p->buff_len - sizeof(keyval_t)*(kv_arr_p->arr_len+1) - kv_arr_p->buff_pos);
int current_map_output_index = (kv_arr_p->arr_len);
keyval_t *kv_p = &(kv_arr_p->arr[current_map_output_index]);
kv_p->key = (char *)malloc(sizeof(keySize));
//kv_arr_p->buff_pos += keySize;
memcpy(kv_p->key,key,keySize);
kv_p->keySize = keySize;
kv_p->val = (char *)malloc(sizeof(valSize));
//kv_arr_p->buff_pos += valSize;
memcpy(kv_p->val,val,valSize);
kv_p->valSize = valSize;
kv_arr_p->arr_len++;
//DoLog("current d_g_state->intermediate_keyval_arr_arr_p len:%d count:%d",kv_arr_p->arr_len,count);
//d_g_state->d_intermediate_keyval_total_count[map_task_idx] = kv_arr_p->arr_len;
/*
kv_p->key = (char *)(buff)+kv_arr_p->buff_pos;
kv_arr_p->buff_pos += keySize;
memcpy(kv_p->key,key,keySize);
kv_p->keySize = keySize;
kv_p->val = (char *)(buff)+kv_arr_p->buff_pos;
kv_arr_p->buff_pos += valSize;
memcpy(kv_p->val,val,valSize);
kv_p->valSize = valSize;
kv_arr_p->arr_len++;
d_g_state->d_intermediate_keyval_total_count[map_task_idx] = kv_arr_p->arr_len;
*/
//printf("CPU EmitInterMediate2 map_task_id:%d, key:%s: keyval_arr_len:%d\n", map_task_idx, kv_p->key, kv_arr_p->arr_len);
}//__device__
__device__ void EmitIntermediate2(void *key, void *val, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){
//printf(":%s :%d\n",key, *(int*)val);
keyval_arr_t *kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[map_task_idx];
//keyval_t *p = (keyval_t *)kv_arr_p->arr;
void *buff = kv_arr_p->buff;
//&(kv_arr_p->arr[len]) = (keyval_t*)((char *)buff - sizeof(keyval_t));
keyval_t *kv_p = (keyval_t *)((char *)buff + kv_arr_p->buff_len - sizeof(keyval_t)*((*kv_arr_p->total_arr_len)+1));
kv_arr_p->arr = kv_p;
(*kv_arr_p->total_arr_len)++;
//TODO Hui Li 8/6/2012
if (!(kv_arr_p->buff_pos +keySize+valSize < kv_arr_p->buff_len - sizeof(keyval_t)*((*kv_arr_p->total_arr_len)+1))){
printf("!!!!!!!error there is not engough shared memory\n");
return;
}//
//printf("remain buff:%d\n", kv_arr_p->buff_len - sizeof(keyval_t)*(kv_arr_p->arr_len+1) - kv_arr_p->buff_pos);
//int current_map_output_index = (kv_arr_p->arr_len);
//keyval_t *kv_p = &(kv_arr_p->arr[current_map_output_index]);
kv_p->key = (char *)(buff)+kv_arr_p->buff_pos;
kv_arr_p->buff_pos += keySize;
memcpy(kv_p->key,key,keySize);
kv_p->keySize = keySize;
kv_p->val = (char *)(buff)+kv_arr_p->buff_pos;
kv_arr_p->buff_pos += valSize;
memcpy(kv_p->val,val,valSize);
kv_p->valSize = valSize;
kv_arr_p->arr_len++;
d_g_state->d_intermediate_keyval_total_count[map_task_idx] = kv_arr_p->arr_len;
//printf("EmitInterMediate2 TID[%d] map_task_id:%d, key%s: keyval_arr_len:%d\n",TID, map_task_idx, kv_p->key, kv_arr_p->arr_len);
}//__device__
//-------------------------------------------------
//called by user defined map function
//-------------------------------------------------
__global__ void GPUMapPartitioner(gpu_context d_g_state)
{
/*int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;*/
int num_records_per_thread = (d_g_state.num_input_record+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x);
int block_start_row_id_idx = num_records_per_thread*blockIdx.x*blockDim.x;
int thread_start_row_id_idx = block_start_row_id_idx
+ (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE
+ (threadIdx.x%STRIDE);
int thread_end_idx = thread_start_row_id_idx+num_records_per_thread*STRIDE;
if (thread_end_idx>d_g_state.num_input_record)
thread_end_idx = d_g_state.num_input_record;
if (thread_start_row_id_idx >= thread_end_idx)
return;
char * buff = (char *)malloc(sizeof(char)*1024*100);
int * total_arr_len = (int*)malloc(sizeof(int));
(*total_arr_len) = 0;
keyval_arr_t *kv_arr_t_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*(thread_end_idx-thread_start_row_id_idx+STRIDE-1)/STRIDE);
int index = 0;
//printf("Mapper TID:%d, thread_start_row_id_idx:%d thread_end_idx:%d runed tasks:%d totalTasks:%d\n",
// TID, thread_start_row_id_idx,thread_end_idx,(thread_end_idx - thread_start_row_id_idx)/STRIDE,d_g_state.num_input_record);
for(int map_task_idx=thread_start_row_id_idx; map_task_idx < thread_end_idx; map_task_idx+=STRIDE){
keyval_arr_t *kv_arr_t = (keyval_arr_t *)&(kv_arr_t_arr[index]);
index++;
kv_arr_t->buff = buff;
kv_arr_t->total_arr_len = total_arr_len;
kv_arr_t->buff_len = 1024*100;
kv_arr_t->buff_pos = 0;
kv_arr_t->arr = NULL;
kv_arr_t->arr_len = 0;
//keyval_arr_t *kv_arr_p = &(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx]);
d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx] = kv_arr_t;
/*(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx]).buff = buff;
(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx]).buff_len = 1024*1024;
(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx]).buff_pos = 0;*/
//void *val = d_g_state.d_input_keyval_arr[map_task_idx].val;
char *key = (char *)(d_g_state.d_input_keys_shared_buff) + d_g_state.d_input_keyval_pos_arr[map_task_idx].keyPos;
char *val = (char *)(d_g_state.d_input_vals_shared_buff) + d_g_state.d_input_keyval_pos_arr[map_task_idx].valPos;
int valSize = d_g_state.d_input_keyval_pos_arr[map_task_idx].valSize;
int keySize = d_g_state.d_input_keyval_pos_arr[map_task_idx].keySize;
//printf("map_task_idx:%d keySize:%d valSize:%d\n",map_task_idx, keySize, valSize);
//printf("key:%d keySize:%d\n",*(int *)key, keySize);
//TODO calculate the key val pair here directly.
///////////////////////////////////////////////////////////
gpu_map(key, val, keySize, valSize, &d_g_state, map_task_idx);
///////////////////////////////////////////////////////////
}//for
//printf("task id:%d\n",TID);
__syncthreads();
}//GPUMapPartitioner
int StartCPUMap2(thread_info_t* thread_info)
{
cpu_context *d_g_state = (cpu_context*)(thread_info->d_g_state);
job_configuration *cpu_job_conf = (job_configuration*)(thread_info->job_conf);
//DoLog("there are %d input records for map tasks.",cpu_job_conf->num_input_record);
if (cpu_job_conf->num_input_record<=0) { DoLog("Error: no any input keys"); exit(-1);}
if (cpu_job_conf->input_keyval_arr == NULL) { DoLog("Error: input_keyval_arr == NULL"); exit(-1);}
if (d_g_state->num_cpus_cores <= 0) { DoLog("Error: d_g_state->num_cpus == 0"); exit(-1);}
//-------------------------------------------------------
//1, prepare buffer to store intermediate results
//-------------------------------------------------------
//DoLog("prepare buffer to store intermediate results");
keyval_arr_t *d_keyval_arr_p;
int *count = NULL;
//---------------------------------------------
//3, determine the number of threads to run
//---------------------------------------------
DoLog("CPU_GROUP_ID:[%d] the number of cpus used in computation:%d",d_g_state->cpu_group_id, d_g_state->num_cpus_cores);
//--------------------------------------------------
//4, start_row_id map
//--------------------------------------------------
int num_threads = d_g_state->num_cpus_cores;
//DoLog("start_row_id CPUMapPartitioner num_threads:%d num_input_record:%d",num_threads, cpu_job_conf->num_input_record);
int num_records_per_thread = (cpu_job_conf->num_input_record+num_threads-1)/(num_threads);
int start_row_id_idx = 0;
int end_idx = 0;
//pthread_t *cpu_threads;
//thread_info_t *cpu_threads_info;
for (int tid = 0;tid<num_threads;tid++){
end_idx = start_row_id_idx + num_records_per_thread;
if (tid < (cpu_job_conf->num_input_record % num_threads) )
end_idx++;
d_g_state->panda_cpu_task_info[tid].start_row_id_idx = start_row_id_idx;
if (end_idx > cpu_job_conf->num_input_record)
end_idx = cpu_job_conf->num_input_record;
d_g_state->panda_cpu_task_info[tid].end_idx = end_idx;
if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUMapThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0)
perror("Thread creation failed!\n");
start_row_id_idx = end_idx;
}//for
for (int tid = 0;tid<num_threads;tid++){
void *exitstat;
if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed");
}//for
DoLog("CPU_GROUP_ID:[%d] DONE", thread_info->tid);
return 0;
}//int
int StartCPUMap(cpu_context *d_g_state)
{
#ifdef DEV_MODE
DoLog("there are %d map tasks.",d_g_state->num_input_record);
if (d_g_state->num_input_record<=0) { DoLog("Error: no any input keys"); exit(-1);}
if (d_g_state->input_keyval_arr == NULL) { DoLog("Error: input_keyval_arr == NULL"); exit(-1);}
if (d_g_state->num_cpus_cores <= 0) { DoLog("Error: d_g_state->num_cpus == 0"); exit(-1);}
//-------------------------------------------------------
//1, prepare buffer to store intermediate results
//-------------------------------------------------------
DoLog("prepare buffer to store intermediate results");
keyval_arr_t *d_keyval_arr_p;
int *count = NULL;
//---------------------------------------------
//3, determine the number of threads to run
//---------------------------------------------
DoLog("the number of cpus used in computation:%d",d_g_state->num_cpus_cores);
//--------------------------------------------------
//4, start_row_id map
//--------------------------------------------------
int num_threads = d_g_state->num_cpus_cores;
DoLog("start_row_id CPUMapPartitioner num_threads:%d num_input_record:%d",num_threads, d_g_state->num_input_record);
int num_records_per_thread = (d_g_state->num_input_record+num_threads-1)/(num_threads);
int start_row_id_idx = 0;
int end_idx = 0;
//pthread_t *cpu_threads;
//thread_info_t *cpu_threads_info;
for (int tid = 0;tid<num_threads;tid++){
end_idx = start_row_id_idx + num_records_per_thread;
if (tid < (d_g_state->num_input_record % num_threads) )
end_idx++;
d_g_state->panda_cpu_task_info[tid].start_row_id_idx = start_row_id_idx;
if (end_idx > d_g_state->num_input_record)
end_idx = d_g_state->num_input_record;
d_g_state->panda_cpu_task_info[tid].end_idx = end_idx;
//pthread_t *panda_cpu_task;
//panda_cpu_task_info_t *panda_cpu_task_info;
DoLog("tests");
if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUMapThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0)
perror("Thread creation failed!\n");
start_row_id_idx = end_idx;
}//for
for (int tid = 0;tid<num_threads;tid++){
void *exitstat;
if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed");
}//for
DoLog("DONE :%d tasks current intermediate len:%d",panda_cpu_task_info->end_idx - panda_cpu_task_info->start_row_id_idx, d_g_state->intermediate_keyval_arr_arr_p[0].arr_len);
DoLog("DONE");
#endif
return 0;
}//int
//--------------------------------------------------
//StartGPUMap
//
//7/1/2012
//--------------------------------------------------
int StartGPUMap(gpu_context *d_g_state)
{
//-------------------------------------------------------
//0, Check status of d_g_state;
//-------------------------------------------------------
DoLog("GPU_ID:[%d] check num_input_record h_input_keyval_arr before StartGPUMap",d_g_state->gpu_id, d_g_state->num_input_record);
if (d_g_state->num_input_record<0) { DoLog("Error: no any input keys"); exit(-1);}
if (d_g_state->h_input_keyval_arr == NULL) { DoLog("Error: h_input_keyval_arr == NULL"); exit(-1);}
if (d_g_state->num_mappers<=0) {d_g_state->num_mappers = (NUM_BLOCKS)*(NUM_THREADS);}
if (d_g_state->num_reducers<=0) {d_g_state->num_reducers = (NUM_BLOCKS)*(NUM_THREADS);}
//-------------------------------------------------------
//1, prepare buffer to store intermediate results
//-------------------------------------------------------
//DoLog("prepare buffer to store intermediate results");
keyval_arr_t *h_keyval_arr_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*d_g_state->num_input_record);
keyval_arr_t *d_keyval_arr_arr;
checkCudaErrors(cudaMalloc((void**)&(d_keyval_arr_arr),d_g_state->num_input_record*sizeof(keyval_arr_t)));
for (int i=0; i<d_g_state->num_input_record;i++){
h_keyval_arr_arr[i].arr = NULL;
h_keyval_arr_arr[i].arr_len = 0;
}//for
//checkCudaErrors(cudaMemcpy(d_keyval_arr_arr, h_keyval_arr_arr, sizeof(keyval_arr_t)*d_g_state->num_input_record,cudaMemcpyHostToDevice));
//d_g_state->d_intermediate_keyval_arr_arr = d_keyval_arr_arr;
keyval_arr_t **d_keyval_arr_arr_p;
checkCudaErrors(cudaMalloc((void***)&(d_keyval_arr_arr_p),d_g_state->num_input_record*sizeof(keyval_arr_t*)));
d_g_state->d_intermediate_keyval_arr_arr_p = d_keyval_arr_arr_p;
int *count = NULL;
checkCudaErrors(cudaMalloc((void**)&(count),d_g_state->num_input_record*sizeof(int)));
d_g_state->d_intermediate_keyval_total_count = count;
checkCudaErrors(cudaMemset(d_g_state->d_intermediate_keyval_total_count,0,d_g_state->num_input_record*sizeof(int)));
//----------------------------------------------
//3, determine the number of threads to run
//----------------------------------------------
//TODO determine the number of threads to run
//DoLog("gpu_id[%d] determine the number of threads (NUM_BLOCKS, NUM_THREADS) to run GPUMapPartitioner",d_g_state->gpu_id);
//int num_threads = d_g_state->num_input_record;
//calculate NUM_BLOCKS, NUM_THREADS
//--------------------------------------------------
//4, start_row_id map
//--------------------------------------------------
/*dim3 h_dimBlock(512,1,1);
dim3 h_dimGrid(4,1,1);
dim3 h_dimThread(1,1,1);
int sizeSmem = 128;*/
//DoLog("start_row_id GPUMapPartitioner");
cudaThreadSynchronize();
//printf("avail_mem:%d \n",avail_mem);
//modified for Panda Matrix Multiplication
//int block_size = 10;
//dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
//dim3 grid(d_g_state->matrix_size / threads.x, d_g_state->matrix_size / threads.y);
//dim3 grid(d_g_state->matrix_size / BLOCK_SIZE, d_g_state->matrix_size / BLOCK_SIZE);
//DoLog(" matrix size :%d block_size:%d threads.y:%d threads.x:%d",d_g_state->matrix_size, BLOCK_SIZE, threads.y, threads.x);
int num_blocks = (d_g_state->num_mappers+(NUM_THREADS)-1)/(NUM_THREADS);
//GPUMapPartitioner<<<NUM_BLOCKS,NUM_THREADS>>>(*d_g_state);
DoLog("GPU_ID:[%d] NUM_BLOCKS:%d NUM_THREADS:%d to run GPUMapPartitioner",d_g_state->gpu_id, num_blocks, NUM_THREADS);
GPUMapPartitioner<<<num_blocks,NUM_THREADS>>>(*d_g_state);
cudaThreadSynchronize();
//size_t total_mem,avail_mem, heap_limit;
//checkCudaErrors(cudaMemGetInfo( &avail_mem, &total_mem ));
//printf("avail_mem:%d \n",avail_mem);
DoLog("GPU_ID:[%d] DONE",d_g_state->gpu_id);
return 0;
}//int
//--------------------------------------------------
//start_row_id map
//
//1, get map input data on host
//2, upload map input data to device memory
// (keys, vals, keyOffsets, valOffsets, keySizes, valSizes)
//3, determine the number of threads to run
//4, calculate intermediate data keys'buf size
// and values' buf size
//5, do prefix sum on--
// i) d_interKeysSizePerTask
// ii) d_interValsSizePerTask
// iii) d_interCountPerTask
//6, allocate intermediate memory on device memory
//7, start_row_id map
//8, free allocated memory
//--------------------------------------------------
/*
int start_row_idMap(Spec_t* spec, gpu_context *d_g_state)
{
Spec_t* g_spec = spec;
if (g_spec->inputKeys == NULL) { DoLog("Error: no any input keys"); exit(0);}
if (g_spec->inputVals == NULL) { DoLog("Error: no any input values"); exit(0); }
if (g_spec->inputOffsetSizes == NULL) { DoLog( "Error: no any input pointer info"); exit(0); }
if (g_spec->inputRecordCount == 0) {DoLog( "Error: invalid input record count"); exit(0);}
//-------------------------------------------------------
//1, get map input data on host
//-------------------------------------------------------
return 0;
}//return 0;
*/
void DestroyDGlobalState(gpu_context * d_g_state){
}//void
void StartGPUShuffle(gpu_context * state){
DoLog("GPU_ID:[%d] GPU Shuffle", state->gpu_id);
gpu_context* d_g_state = state;
Shuffle4GPUOutput(d_g_state);
//DoLog("DONE");
}//void
void *RunPandaCPUMapThread(void *ptr){
panda_cpu_task_info_t *panda_cpu_task_info = (panda_cpu_task_info_t *)ptr;
cpu_context *d_g_state = (cpu_context *)(panda_cpu_task_info->d_g_state);
job_configuration *cpu_job_conf = (job_configuration *)(panda_cpu_task_info->cpu_job_conf);
//DoLog("panda_cpu_task_info_t start_row_id_idx:%d end_idx:%d",panda_cpu_task_info->start_row_id_idx, panda_cpu_task_info->end_idx);
for (int map_idx = panda_cpu_task_info->start_row_id_idx; map_idx < panda_cpu_task_info->end_idx; map_idx++){
keyval_t *kv_p = (keyval_t *)(&(cpu_job_conf->input_keyval_arr[map_idx]));
//void cpu_map(void *KEY, void*VAL, int keySize, int valSize, cpu_context *d_g_state, int map_task_idx){
cpu_map(kv_p->key,kv_p->val,kv_p->keySize,kv_p->valSize,d_g_state,map_idx);
//DoLog("finished map_task:%d at tid:%d",d_g_state->end_idx - d_g_state->start_row_id_idx, d_g_state->tid);
}//for
DoLog("DONE :%d tasks current intermediate len:%d",panda_cpu_task_info->end_idx - panda_cpu_task_info->start_row_id_idx, d_g_state->intermediate_keyval_arr_arr_p[0].arr_len);
return NULL;
}
//Use Pthread to process Panda_Reduce
void * Panda_Reduce(void *ptr){
thread_info_t *thread_info = (thread_info_t *)ptr;
if(thread_info->device_type == GPU_ACC){
int tid = thread_info->tid;
int num_gpus = 1;//thread_info->num_gpus;
if (num_gpus == 0){
DoLog("thread_info->num_gpus == 0 return");
return NULL;
}//if
cudaSetDevice(tid % num_gpus); // "% num_gpus" allows more CPU threads than GPU devices
int gpu_id;
cudaGetDevice(&gpu_id);
gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state);
DoLog( "start_row_id reduce tasks on GPU:%d tid:%d",gpu_id, tid);
StartGPUReduce(d_g_state);
}//if
if(thread_info->device_type == CPU_ACC){
cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state);
DoLog("Start CPU Reduce Tasks");
if (d_g_state->num_cpus_cores == 0){
DoLog("d_g_state->num_cpus == 0 return");
return NULL;
}
//StartCPUReduce(d_g_state);
//panda_cpu_task_info_t *panda_cpu_task_info = (panda_cpu_task_info_t *)ptr;
//cpu_context *d_g_state = (cpu_context *)(panda_cpu_task_info->d_g_state);
//DoLog("panda_cpu_task_info_t start_row_id_idx:%d end_idx:%d",panda_cpu_task_info->start_row_id_idx, panda_cpu_task_info->end_idx);
for (int map_idx = 0; map_idx < d_g_state->sorted_keyvals_arr_len; map_idx++){
keyvals_t *kv_p = (keyvals_t *)(&(d_g_state->sorted_intermediate_keyvals_arr[map_idx]));
cpu_reduce(kv_p->key, kv_p->vals, kv_p->keySize, kv_p->val_arr_len, d_g_state);
}//for
DoLog("DONE");
}//if
//cudaFree(d_filebuf);
//handle the buffer different
//free(h_filebuf);
//handle the buffer different
return NULL;
}//void
//--------------------------------------------------------
//get a value from value list of the same key
//
//param : vals
//param : interOffsetSizes
//param : index
//return: the wanted value
//--------------------------------------------------------
__device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex)
{
int4 offset = interOffsetSizes[valStartIndex];
return (void*)((char*)vals + keyIndex * offset.w);
}
__device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex)
{
int4 offset = interOffsetSizes[valStartIndex];
return (void*)((char*)key + keyIndex * offset.y);
}
//-------------------------------------------------------
//Reducer
//-------------------------------------------------------
__global__ void ReducePartitioner(gpu_context d_g_state)
{
int num_records_per_thread = (d_g_state.d_sorted_keyvals_arr_len+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x);
int block_start_row_id_idx = num_records_per_thread*blockIdx.x*blockDim.x;
int thread_start_row_id_idx = block_start_row_id_idx
+ (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE
+ (threadIdx.x%STRIDE);
int thread_end_idx = thread_start_row_id_idx+num_records_per_thread*STRIDE;
if(thread_end_idx>d_g_state.d_sorted_keyvals_arr_len)
thread_end_idx = d_g_state.d_sorted_keyvals_arr_len;
//printf("ReducePartitioner: TID:%d start_row_id_idx:%d end_idx:%d d_sorted_keyvals_arr_len:%d\n",TID,thread_start_row_id_idx,thread_end_idx,d_g_state.d_sorted_keyvals_arr_len);
int start_row_id, end;
for(int reduce_task_idx=thread_start_row_id_idx; reduce_task_idx < thread_end_idx; reduce_task_idx+=STRIDE){
if (reduce_task_idx==0)
start_row_id = 0;
else
start_row_id = d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[reduce_task_idx-1];
end = d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[reduce_task_idx];
val_t *val_t_arr = (val_t*)malloc(sizeof(val_t)*(end-start_row_id));
//assert(val_t_arr!=NULL);
int keySize = d_g_state.d_keyval_pos_arr[start_row_id].keySize;
int keyPos = d_g_state.d_keyval_pos_arr[start_row_id].keyPos;
void *key = (char*)d_g_state.d_sorted_keys_shared_buff+keyPos;
//printf("keySize;%d keyPos:%d key:%s\n",keySize,keyPos,key);
//printf("reduce_task_idx:%d keyPos:%d, keySize:%d, key:% start_row_id:%d end:%d\n",reduce_task_idx,keyPos,keySize,start_row_id,end);
//printf("start_row_id:%d end:%d\n",start_row_id,end);
for (int index = start_row_id;index<end;index++){
int valSize = d_g_state.d_keyval_pos_arr[index].valSize;
int valPos = d_g_state.d_keyval_pos_arr[index].valPos;
val_t_arr[index-start_row_id].valSize = valSize;
val_t_arr[index-start_row_id].val = (char*)d_g_state.d_sorted_vals_shared_buff + valPos;
} //for
gpu_reduce(key, val_t_arr, keySize, end-start_row_id, d_g_state);
}//for
}
//----------------------------------------------
//start_row_id reduce
//
//1, if there is not a reduce phase, just return
// then user uses spec->interKeys/spec->intervals
// for further processing
//2, get reduce input data on host
//3, upload reduce input data onto device memory
//4, determine the number of threads to run
//5, calculate output data keys'buf size
// and values' buf size
//6, do prefix sum on--
// i) d_outputKeysSizePerTask
// ii) d_outputValsSizePerTask
// iii) d_outputCountPerTask
//7, allocate output memory on device memory
//8, start_row_id reduce
//9, copy output data to Spect_t structure
//10,free allocated memory
//----------------------------------------------
void StartGPUReduce(gpu_context *d_g_state)
{
cudaThreadSynchronize();
d_g_state->d_reduced_keyval_arr_len = d_g_state->d_sorted_keyvals_arr_len;
checkCudaErrors(cudaMalloc((void **)&(d_g_state->d_reduced_keyval_arr), sizeof(keyval_t)*d_g_state->d_reduced_keyval_arr_len));
DoLog("number of reduce tasks:%d",d_g_state->d_sorted_keyvals_arr_len);
cudaThreadSynchronize();
int num_blocks = (d_g_state->num_reducers+(NUM_THREADS)-1)/(NUM_THREADS);
DoLog("num_blocks:%d NUM_THREADS:%d",num_blocks,NUM_THREADS);
ReducePartitioner<<<num_blocks,NUM_THREADS>>>(*d_g_state);
cudaThreadSynchronize();
DoLog("DONE");
}//void
/*void *Panda_CPU_Map(void *ptr){
return NULL;
}*/
void* Panda_Map(void *ptr){
//DoLog("panda_map");
thread_info_t *thread_info = (thread_info_t *)ptr;
if(thread_info->device_type == GPU_ACC){
gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state);
//DoLog("gpu_id in current Panda_Map:%d",gpu_id);
InitGPUDevice(thread_info);
DoLog("GPU_ID:[%d] Init GPU MapReduce Load Data From Host to GPU memory",d_g_state->gpu_id);
InitGPUMapReduce3(d_g_state);
//printData2<<<1,1>>>(*d_g_state);
DoLog("GPU_ID:[%d] Start GPU Map Tasks",d_g_state->gpu_id);
StartGPUMap(d_g_state);
//d_g_state->d_intermediate_keyval_total_count;
//checkCudaErrors(cudaMemset(d_g_state->d_intermediate_keyval_total_count,0,d_g_state->num_input_record*sizeof(int)));
StartGPUShuffle(d_g_state);
}//if
if(thread_info->device_type == CPU_ACC){
//DoLog("CPU_ACC");
cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state);
DoLog("CPU_GROUP_ID:[%d] Init CPU Device",d_g_state->cpu_group_id);
InitCPUDevice(thread_info);
//DoLog("Init CPU MapReduce");
InitCPUMapReduce2(thread_info);
DoLog("CPU_GROUP_ID:[%d] Start CPU Map Tasks",d_g_state->cpu_group_id);
StartCPUMap2(thread_info);
//d_g_state->d_intermediate_keyval_total_count;
//checkCudaErrors(cudaMemset(d_g_state->d_intermediate_keyval_total_count,0,d_g_state->num_input_record*sizeof(int)));
StartCPUShuffle2(thread_info);
}
return NULL;
}//FinishMapReduce2(d_g_state);
void FinishMapReduce(Spec_t* spec)
{
DoLog( "=====finish map/reduce=====");
}//void
void FinishMapReduce2(gpu_context* state)
{
size_t total_mem,avail_mem, heap_limit;
checkCudaErrors(cudaMemGetInfo( &avail_mem, &total_mem ));
DoLog("avail_mem:%d",avail_mem);
}//void
#endif //__PANDALIB_CU__ |
ca8342d410db12ece88928218273348c8a995c74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/NumericLimits.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/NumericUtils.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int64_t get_intervals(
accscalar_t sample,
int64_t index,
int64_t inputSize,
int64_t outputSize,
int64_t poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int64_t>((index + sample) * alpha) - \
static_cast<int64_t>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool3d_out_frame(
PackedTensorAccessor64<scalar_t, 5> input,
PackedTensorAccessor64<scalar_t, 5> output,
PackedTensorAccessor64<int64_t, 5> indices,
PackedTensorAccessor64<scalar_t, 3> samples,
int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
// Output (t, h, w) point that this thread is responsible for
int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int64_t plane = blockIdx.y;
int64_t batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3) *
output.size(4)){
int64_t outputT = ourOutputPoint / (output.size(3) *
output.size(4));
int64_t outputH = (ourOutputPoint / output.size(4)) %
output.size(3);
int64_t outputW = ourOutputPoint % output.size(4);
int64_t poolT = get_intervals<scalar_t,accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputT, input.size(2), output.size(2), poolSizeT);
int64_t poolH = get_intervals<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(3), output.size(3), poolSizeH);
int64_t poolW = get_intervals<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][2]),
outputW, input.size(4), output.size(4), poolSizeW);
scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound();
int64_t maxIndex = poolT * input.size(3) * input.size(4) + poolH * input.size(4) + poolW;
for(int64_t t = poolT; t < poolT + poolSizeT; ++ t) {
for (int64_t h = poolH; h < poolH + poolSizeH; ++h) {
if(poolSizeW < 2 || poolSizeW > 7) {
for (int64_t w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][t][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || at::_isnan(val)) {
maxIndex = t * input.size(3) *
input.size(4) + h * input.size(4) + w;
maxVal = val;
}
}
} else {
for (int64_t i = 0; i < poolSizeW; ++i) {
int64_t w = i + poolW;
scalar_t val = input[batch][plane][t][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || at::_isnan(val)) {
maxIndex = t * input.size(3) * input.size(4) +
h * input.size(4) + w;
maxVal = val;
}
}
}
}
}
indices[batch][plane][outputT][outputH][outputW] = maxIndex;
output[batch][plane][outputT][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool3d_backward_out_frame(
PackedTensorAccessor64<scalar_t, 5> gradInput,
PackedTensorAccessor64<scalar_t, 5> gradOutput,
PackedTensorAccessor64<int64_t, 5> indices) {
// Output (h, w) point that this thread is responsible for
int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int64_t plane = blockIdx.y;
int64_t batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3) * gradOutput.size(4)) {
int64_t outputW = ourOutputPoint % gradOutput.size(4);
int64_t outputH = (ourOutputPoint / gradOutput.size(4)) %
gradOutput.size(3);
int64_t outputT = ourOutputPoint / (gradOutput.size(3) *
gradOutput.size(4));
int64_t index = indices[batch][plane][outputT][outputH][outputW];
assert(index >= 0);
int64_t inputW = index % gradInput.size(4);
int64_t inputH = (index / gradInput.size(4)) %
gradInput.size(3);
int64_t inputT = index / (gradInput.size(3) *
gradInput.size(4));
assert(inputT < gradInput.size(2));
gpuAtomicAddNoReturn(
&gradInput[batch][plane][inputT][inputH][inputW],
gradOutput[batch][plane][outputT][outputH][outputW]
);
}
}
void fractional_max_pool3d_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples) {
TORCH_CHECK(pool_size.size() == 3,
"fractional_max_pool3d: kernel_size must either be a single Int or tuple of three Ints")
TORCH_CHECK(output_size.size() == 3,
"fractional_max_pool3d: output_size must either be a single Int or tuple of three Ints")
int64_t planeDim = 0;
int64_t dimt = 1;
int64_t dimh = 2;
int64_t dimw = 3;
int64_t numBatch = 1;
int64_t outputT = output_size[0];
int64_t outputH = output_size[1];
int64_t outputW = output_size[2];
int64_t poolSizeT = pool_size[0];
int64_t poolSizeH = pool_size[1];
int64_t poolSizeW = pool_size[2];
int64_t ndims = input.ndimension();
TORCH_CHECK(
ndims == 4 || ndims == 5,
"fractional_max_pool3d_out_cuda_template(): ",
"Expected 4D or 5D tensor, but got: ", input.sizes());
for (int64_t i = 1; i < ndims; ++i) {
TORCH_CHECK(input.size(i) > 0,
"fractional_max_pool3d_out_cuda_template(): ",
"Expected input to have non-zero size for non-batch dimensions, but got",
input.sizes(), " with dimension ", i, " being empty.");
}
if (ndims == 5) {
numBatch = input.size(0);
planeDim++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
int64_t numPlanes = input.size(planeDim);
int64_t inputT = input.size(dimt);
int64_t inputH = input.size(dimh);
int64_t inputW = input.size(dimw);
TORCH_CHECK(
outputT + poolSizeT - 1 < inputT,
"fractional_max_pool3d_out_cuda_template(): ",
"pool time (", poolSizeT, ") too large relative to input time (",
inputT, ")");
TORCH_CHECK(
outputH + poolSizeH - 1 < inputH,
"fractional_max_pool3d_out_cuda_template(): ",
"pool height (", poolSizeH, ") too large relative to input height (",
inputH, ")");
TORCH_CHECK(
outputW + poolSizeW - 1 < inputW,
"fractional_max_pool3d_out_cuda_template(): ",
"pool width (", poolSizeW, ") too large relative to input width (",
inputW, ")");
if (ndims == 4) {
/* resize output */
output.resize_({numPlanes, outputT, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numPlanes, outputT, outputH, outputW});
} else {
/* resize output */
output.resize_({numBatch, numPlanes, outputT, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numBatch, numPlanes, outputT, outputH, outputW});
}
auto output_ = output;
auto indices_ = indices;
auto input_ = input;
if(ndims == 4) {
output_ = output_.reshape({1, numPlanes, outputT, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputT, outputH, outputW});
input_ = input_.reshape({1, numPlanes, inputT, inputH, inputW});
}
if (output_.numel() == 0) {
return;
}
// block is limited to 4 warps
// grid handles overflow per each plane
int64_t outputPlaneSize = output_.size(2) *
output_.size(3) * output_.size(4);
dim3 grid(
(outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"fractional_max_pool3d_out_frame",
[&]{
hipLaunchKernelGGL(( fractional_max_pool3d_out_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_.packed_accessor64<scalar_t, 5>(),
output_.packed_accessor64<scalar_t, 5>(),
indices_.packed_accessor64<int64_t, 5>(),
randomSamples.packed_accessor64<scalar_t, 3>(),
poolSizeT, poolSizeH, poolSizeW
);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
}
void fractional_max_pool3d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices) {
int64_t dimt = 1;
int64_t dimh = 2;
int64_t dimw = 3;
int64_t outputT = output_size[0];
int64_t outputH = output_size[1];
int64_t outputW = output_size[2];
int64_t ndims = input.ndimension();
if (ndims == 5) {
dimt++;
dimh++;
dimw++;
}
/* sizes */
int64_t inputT = input.size(dimt);
int64_t inputH = input.size(dimh);
int64_t inputW = input.size(dimw);
TORCH_CHECK(
outputT == gradOutput.size(dimt),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput time unexpected"
);
TORCH_CHECK(
outputH == gradOutput.size(dimh),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput height unexpected"
);
TORCH_CHECK(
outputW == gradOutput.size(dimw),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput width unexpected"
);
/* resize */
gradInput.resize_as_(input);
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 4) {
gradInput_ = gradInput_.reshape({1, gradInput.size(0), inputT,
inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputT,
outputH, outputW});
indices_ = indices_.reshape({1, indices.size(0), outputT, outputH,
outputW});
}
if (gradInput.numel() == 0) {
return;
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int64_t outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3) * gradOutput_.size(4);
dim3 grid(
(outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
gradOutput.scalar_type(),
"fractional_max_pool3d_backward_out_frame",
[&] {
hipLaunchKernelGGL(( fractional_max_pool3d_backward_out_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_.packed_accessor64<scalar_t, 5>(),
gradOutput_.packed_accessor64<scalar_t, 5>(),
indices_.packed_accessor64<int64_t, 5>()
);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
}
}// namespace
std::tuple<Tensor&, Tensor&> fractional_max_pool3d_out_cuda(const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples,
at::Tensor& output,
at::Tensor& indices) {
fractional_max_pool3d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples
);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> fractional_max_pool3d_cuda(
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples) {
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
fractional_max_pool3d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples
);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& fractional_max_pool3d_backward_out_cuda(const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices,
at::Tensor& gradInput) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool3d_backward_out_cuda");
fractional_max_pool3d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices
);
return gradInput;
}
Tensor fractional_max_pool3d_backward_cuda(
const at::Tensor& gradOutput,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool3d_backward_cuda");
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool3d_backward_out_cuda_template(
gradInput,
gradOutput,
input,
pool_size,
output_size,
indices
);
return gradInput;
}
}// native
}// at
| ca8342d410db12ece88928218273348c8a995c74.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/NumericUtils.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int64_t get_intervals(
accscalar_t sample,
int64_t index,
int64_t inputSize,
int64_t outputSize,
int64_t poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int64_t>((index + sample) * alpha) - \
static_cast<int64_t>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool3d_out_frame(
PackedTensorAccessor64<scalar_t, 5> input,
PackedTensorAccessor64<scalar_t, 5> output,
PackedTensorAccessor64<int64_t, 5> indices,
PackedTensorAccessor64<scalar_t, 3> samples,
int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
// Output (t, h, w) point that this thread is responsible for
int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int64_t plane = blockIdx.y;
int64_t batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3) *
output.size(4)){
int64_t outputT = ourOutputPoint / (output.size(3) *
output.size(4));
int64_t outputH = (ourOutputPoint / output.size(4)) %
output.size(3);
int64_t outputW = ourOutputPoint % output.size(4);
int64_t poolT = get_intervals<scalar_t,accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputT, input.size(2), output.size(2), poolSizeT);
int64_t poolH = get_intervals<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(3), output.size(3), poolSizeH);
int64_t poolW = get_intervals<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][2]),
outputW, input.size(4), output.size(4), poolSizeW);
scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound();
int64_t maxIndex = poolT * input.size(3) * input.size(4) + poolH * input.size(4) + poolW;
for(int64_t t = poolT; t < poolT + poolSizeT; ++ t) {
for (int64_t h = poolH; h < poolH + poolSizeH; ++h) {
if(poolSizeW < 2 || poolSizeW > 7) {
for (int64_t w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][t][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || at::_isnan(val)) {
maxIndex = t * input.size(3) *
input.size(4) + h * input.size(4) + w;
maxVal = val;
}
}
} else {
for (int64_t i = 0; i < poolSizeW; ++i) {
int64_t w = i + poolW;
scalar_t val = input[batch][plane][t][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || at::_isnan(val)) {
maxIndex = t * input.size(3) * input.size(4) +
h * input.size(4) + w;
maxVal = val;
}
}
}
}
}
indices[batch][plane][outputT][outputH][outputW] = maxIndex;
output[batch][plane][outputT][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool3d_backward_out_frame(
PackedTensorAccessor64<scalar_t, 5> gradInput,
PackedTensorAccessor64<scalar_t, 5> gradOutput,
PackedTensorAccessor64<int64_t, 5> indices) {
// Output (h, w) point that this thread is responsible for
int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int64_t plane = blockIdx.y;
int64_t batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3) * gradOutput.size(4)) {
int64_t outputW = ourOutputPoint % gradOutput.size(4);
int64_t outputH = (ourOutputPoint / gradOutput.size(4)) %
gradOutput.size(3);
int64_t outputT = ourOutputPoint / (gradOutput.size(3) *
gradOutput.size(4));
int64_t index = indices[batch][plane][outputT][outputH][outputW];
assert(index >= 0);
int64_t inputW = index % gradInput.size(4);
int64_t inputH = (index / gradInput.size(4)) %
gradInput.size(3);
int64_t inputT = index / (gradInput.size(3) *
gradInput.size(4));
assert(inputT < gradInput.size(2));
gpuAtomicAddNoReturn(
&gradInput[batch][plane][inputT][inputH][inputW],
gradOutput[batch][plane][outputT][outputH][outputW]
);
}
}
void fractional_max_pool3d_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples) {
TORCH_CHECK(pool_size.size() == 3,
"fractional_max_pool3d: kernel_size must either be a single Int or tuple of three Ints")
TORCH_CHECK(output_size.size() == 3,
"fractional_max_pool3d: output_size must either be a single Int or tuple of three Ints")
int64_t planeDim = 0;
int64_t dimt = 1;
int64_t dimh = 2;
int64_t dimw = 3;
int64_t numBatch = 1;
int64_t outputT = output_size[0];
int64_t outputH = output_size[1];
int64_t outputW = output_size[2];
int64_t poolSizeT = pool_size[0];
int64_t poolSizeH = pool_size[1];
int64_t poolSizeW = pool_size[2];
int64_t ndims = input.ndimension();
TORCH_CHECK(
ndims == 4 || ndims == 5,
"fractional_max_pool3d_out_cuda_template(): ",
"Expected 4D or 5D tensor, but got: ", input.sizes());
for (int64_t i = 1; i < ndims; ++i) {
TORCH_CHECK(input.size(i) > 0,
"fractional_max_pool3d_out_cuda_template(): ",
"Expected input to have non-zero size for non-batch dimensions, but got",
input.sizes(), " with dimension ", i, " being empty.");
}
if (ndims == 5) {
numBatch = input.size(0);
planeDim++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
int64_t numPlanes = input.size(planeDim);
int64_t inputT = input.size(dimt);
int64_t inputH = input.size(dimh);
int64_t inputW = input.size(dimw);
TORCH_CHECK(
outputT + poolSizeT - 1 < inputT,
"fractional_max_pool3d_out_cuda_template(): ",
"pool time (", poolSizeT, ") too large relative to input time (",
inputT, ")");
TORCH_CHECK(
outputH + poolSizeH - 1 < inputH,
"fractional_max_pool3d_out_cuda_template(): ",
"pool height (", poolSizeH, ") too large relative to input height (",
inputH, ")");
TORCH_CHECK(
outputW + poolSizeW - 1 < inputW,
"fractional_max_pool3d_out_cuda_template(): ",
"pool width (", poolSizeW, ") too large relative to input width (",
inputW, ")");
if (ndims == 4) {
/* resize output */
output.resize_({numPlanes, outputT, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numPlanes, outputT, outputH, outputW});
} else {
/* resize output */
output.resize_({numBatch, numPlanes, outputT, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numBatch, numPlanes, outputT, outputH, outputW});
}
auto output_ = output;
auto indices_ = indices;
auto input_ = input;
if(ndims == 4) {
output_ = output_.reshape({1, numPlanes, outputT, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputT, outputH, outputW});
input_ = input_.reshape({1, numPlanes, inputT, inputH, inputW});
}
if (output_.numel() == 0) {
return;
}
// block is limited to 4 warps
// grid handles overflow per each plane
int64_t outputPlaneSize = output_.size(2) *
output_.size(3) * output_.size(4);
dim3 grid(
(outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"fractional_max_pool3d_out_frame",
[&]{
fractional_max_pool3d_out_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
input_.packed_accessor64<scalar_t, 5>(),
output_.packed_accessor64<scalar_t, 5>(),
indices_.packed_accessor64<int64_t, 5>(),
randomSamples.packed_accessor64<scalar_t, 3>(),
poolSizeT, poolSizeH, poolSizeW
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
}
void fractional_max_pool3d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices) {
int64_t dimt = 1;
int64_t dimh = 2;
int64_t dimw = 3;
int64_t outputT = output_size[0];
int64_t outputH = output_size[1];
int64_t outputW = output_size[2];
int64_t ndims = input.ndimension();
if (ndims == 5) {
dimt++;
dimh++;
dimw++;
}
/* sizes */
int64_t inputT = input.size(dimt);
int64_t inputH = input.size(dimh);
int64_t inputW = input.size(dimw);
TORCH_CHECK(
outputT == gradOutput.size(dimt),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput time unexpected"
);
TORCH_CHECK(
outputH == gradOutput.size(dimh),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput height unexpected"
);
TORCH_CHECK(
outputW == gradOutput.size(dimw),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput width unexpected"
);
/* resize */
gradInput.resize_as_(input);
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 4) {
gradInput_ = gradInput_.reshape({1, gradInput.size(0), inputT,
inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputT,
outputH, outputW});
indices_ = indices_.reshape({1, indices.size(0), outputT, outputH,
outputW});
}
if (gradInput.numel() == 0) {
return;
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int64_t outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3) * gradOutput_.size(4);
dim3 grid(
(outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
gradOutput.scalar_type(),
"fractional_max_pool3d_backward_out_frame",
[&] {
fractional_max_pool3d_backward_out_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
gradInput_.packed_accessor64<scalar_t, 5>(),
gradOutput_.packed_accessor64<scalar_t, 5>(),
indices_.packed_accessor64<int64_t, 5>()
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
}
}// namespace
std::tuple<Tensor&, Tensor&> fractional_max_pool3d_out_cuda(const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples,
at::Tensor& output,
at::Tensor& indices) {
fractional_max_pool3d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples
);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> fractional_max_pool3d_cuda(
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples) {
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
fractional_max_pool3d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples
);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& fractional_max_pool3d_backward_out_cuda(const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices,
at::Tensor& gradInput) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool3d_backward_out_cuda");
fractional_max_pool3d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices
);
return gradInput;
}
Tensor fractional_max_pool3d_backward_cuda(
const at::Tensor& gradOutput,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool3d_backward_cuda");
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool3d_backward_out_cuda_template(
gradInput,
gradOutput,
input,
pool_size,
output_size,
indices
);
return gradInput;
}
}// native
}// at
|
6a4bbf81f631f99e9379ba596dbcc0e9bfd6c04c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zgesellcmmv.cu, normal z -> d, Sun Nov 20 20:20:40 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_d
//#define TEXTURE
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_1(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
const double * __restrict__ dval,
const magma_index_t * __restrict__ dcolind,
const magma_index_t * __restrict__ drowptr,
const double *__restrict__ dx,
double beta,
double * __restrict__ dy)
{
// threads assigned to rows
//int Idx = blockDim.x * blockIdx.x + threadIdx.x;
//int offset = drowptr[ blockIdx.x ];
//int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
// T threads assigned to each row
int idx = threadIdx.x; // local row
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * 256 + idx; // global row index
// int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) );
int lrow = threadIdx.x%blocksize; // local row;
if( row < num_rows ) {
int offset = drowptr[ row/blocksize ];
int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize;
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++) {
int col = dcolind [ offset+ blocksize * n + lrow ];
double val = dval[ offset+ blocksize * n + lrow ];
dot = dot + val * dx [ col ];
}
if (betazero) {
dy[ row ] = dot * alpha;
} else {
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_4(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * dx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
double x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ) {
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_8(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * dx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
double x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_16(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * dx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_32(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * dx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
/************************* same but using texture mem *************************/
#if defined(PRECISION_d) && defined(TEXTURE)
__inline__ __device__ double
read_from_tex( hipTextureObject_t texdx, const int& i) {
int2 temp = tex1Dfetch<int2>( texdx, i );
return __hiloint2double(temp.y,temp.x);
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_4_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
double x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ) {
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_8_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
double x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_16_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_32_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
#endif
/********************* end of texture versions **************************/
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
// using a 2D thread grid
int num_threads = blocksize*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = min( int( sqrt( double( slices ))), 65535 );
int dimgrid2 = min(magma_ceildiv( slices, dimgrid1 ), 65535);
int dimgrid3 = magma_ceildiv( slices, dimgrid1*dimgrid2 );
int num_tx = blocksize;
int Ms = num_threads * sizeof( double );
// special case: alignment 1:
if( alignment == 1 ){
Ms = 0;
num_tx = 256;
int num_blocks = magma_ceildiv( n, 256 );
dimgrid1 = num_blocks; //min( int( sqrt( double( num_blocks ))), 65535 );
dimgrid2 = 1; //magma_ceildiv( num_blocks, dimgrid1 );
dimgrid3 = 1;
//blocksize = 256;
}
dim3 block( num_tx, alignment, 1);
if( dimgrid3 > 65535 ){
printf("error: too many GPU thread blocks requested.\n");
}
dim3 grid( dimgrid1, dimgrid2, 1);
#if defined(PRECISION_d) && defined(TEXTURE)
// Create channel.
hipChannelFormatDesc channel_desc;
channel_desc =
hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned);
// Create resource descriptor.
struct hipResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = hipResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m*sizeof(double);
// Specify texture object parameters.
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
// Create texture object.
hipTextureObject_t texdx = 0;
hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
if ( alignment == 1) {
if (beta == MAGMA_D_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<true>), dim3(grid2), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
} else if ( alignment == 4){
if (beta == MAGMA_D_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 8){
if (beta == MAGMA_D_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 16){
if (beta == MAGMA_D_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 32){
if (beta == MAGMA_D_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
hipDestroyTextureObject(texdx);
#else
if ( alignment == 1) {
if (beta == MAGMA_D_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 4){
if (beta == MAGMA_D_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 8){
if (beta == MAGMA_D_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 16){
if (beta == MAGMA_D_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 32){
if (beta == MAGMA_D_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
return MAGMA_SUCCESS;
}
| 6a4bbf81f631f99e9379ba596dbcc0e9bfd6c04c.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zgesellcmmv.cu, normal z -> d, Sun Nov 20 20:20:40 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_d
//#define TEXTURE
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_1(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
const double * __restrict__ dval,
const magma_index_t * __restrict__ dcolind,
const magma_index_t * __restrict__ drowptr,
const double *__restrict__ dx,
double beta,
double * __restrict__ dy)
{
// threads assigned to rows
//int Idx = blockDim.x * blockIdx.x + threadIdx.x;
//int offset = drowptr[ blockIdx.x ];
//int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
// T threads assigned to each row
int idx = threadIdx.x; // local row
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * 256 + idx; // global row index
// int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) );
int lrow = threadIdx.x%blocksize; // local row;
if( row < num_rows ) {
int offset = drowptr[ row/blocksize ];
int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize;
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++) {
int col = dcolind [ offset+ blocksize * n + lrow ];
double val = dval[ offset+ blocksize * n + lrow ];
dot = dot + val * dx [ col ];
}
if (betazero) {
dy[ row ] = dot * alpha;
} else {
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_4(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * dx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
double x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ) {
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_8(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * dx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
double x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_16(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * dx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_32(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * dx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
/************************* same but using texture mem *************************/
#if defined(PRECISION_d) && defined(TEXTURE)
__inline__ __device__ double
read_from_tex( cudaTextureObject_t texdx, const int& i) {
int2 temp = tex1Dfetch<int2>( texdx, i );
return __hiloint2double(temp.y,temp.x);
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_4_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
double x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ) {
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_8_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
double x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_16_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_32_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
double beta,
double * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
#endif
/********************* end of texture versions **************************/
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
// using a 2D thread grid
int num_threads = blocksize*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = min( int( sqrt( double( slices ))), 65535 );
int dimgrid2 = min(magma_ceildiv( slices, dimgrid1 ), 65535);
int dimgrid3 = magma_ceildiv( slices, dimgrid1*dimgrid2 );
int num_tx = blocksize;
int Ms = num_threads * sizeof( double );
// special case: alignment 1:
if( alignment == 1 ){
Ms = 0;
num_tx = 256;
int num_blocks = magma_ceildiv( n, 256 );
dimgrid1 = num_blocks; //min( int( sqrt( double( num_blocks ))), 65535 );
dimgrid2 = 1; //magma_ceildiv( num_blocks, dimgrid1 );
dimgrid3 = 1;
//blocksize = 256;
}
dim3 block( num_tx, alignment, 1);
if( dimgrid3 > 65535 ){
printf("error: too many GPU thread blocks requested.\n");
}
dim3 grid( dimgrid1, dimgrid2, 1);
#if defined(PRECISION_d) && defined(TEXTURE)
// Create channel.
cudaChannelFormatDesc channel_desc;
channel_desc =
cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned);
// Create resource descriptor.
struct cudaResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = cudaResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m*sizeof(double);
// Specify texture object parameters.
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
// Create texture object.
cudaTextureObject_t texdx = 0;
cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
if ( alignment == 1) {
if (beta == MAGMA_D_ZERO) {
zgesellptmv2d_kernel_1<true><<< grid2, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_1<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
} else if ( alignment == 4){
if (beta == MAGMA_D_ZERO) {
zgesellptmv2d_kernel_4_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
zgesellptmv2d_kernel_4_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 8){
if (beta == MAGMA_D_ZERO) {
zgesellptmv2d_kernel_8_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
zgesellptmv2d_kernel_8_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 16){
if (beta == MAGMA_D_ZERO) {
zgesellptmv2d_kernel_16_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
zgesellptmv2d_kernel_16_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 32){
if (beta == MAGMA_D_ZERO) {
zgesellptmv2d_kernel_32_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
zgesellptmv2d_kernel_32_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
cudaDestroyTextureObject(texdx);
#else
if ( alignment == 1) {
if (beta == MAGMA_D_ZERO) {
zgesellptmv2d_kernel_1<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_1<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 4){
if (beta == MAGMA_D_ZERO) {
zgesellptmv2d_kernel_4<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_4<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 8){
if (beta == MAGMA_D_ZERO) {
zgesellptmv2d_kernel_8<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_8<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 16){
if (beta == MAGMA_D_ZERO) {
zgesellptmv2d_kernel_16<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_16<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 32){
if (beta == MAGMA_D_ZERO) {
zgesellptmv2d_kernel_32<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_32<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
return MAGMA_SUCCESS;
}
|
f4338b9af755e9ababe65a4890df86ad99748a49.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* Program: Pearson Correlatrion Coefficient computation.
* Author: Andrea Purgato
* Version: integer input version.
*
* File: DeviceReader.cu
* Description: this file support the program with some functions that are related to the performance of the GPU devices.
*
*/
#pragma once
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include "Logger.cpp"
/*
Struct hat contains the relevant GPU info.
*/
struct device{
char *name;
size_t globalMem;
int warpSize;
int maxThreadPerBlock;
int maxBlockSize[3];
int maxGridSize[3];
int cuncurrentKernels;
int registerPerBlock;
};
/*
Struct used to store the performance.
*/
struct gpuPerformance{
gpuPerformance() : millisecAvgVariance(0), millisecAvgCovariance(0), millisecAvgCorrelation(0), millisecTotVariance(0), millisecTotCovariance(0), millisecTotCorrelation(0){};
float millisecAvgVariance;
float millisecAvgCovariance;
float millisecAvgCorrelation;
float millisecTotVariance;
float millisecTotCovariance;
float millisecTotCorrelation;
float bwVariance;
float bwCovariance;
float bwCorrelation;
};
struct cpuPerformance{
cpuPerformance() : exeTime(0), exeSaving(0), waitingTime(0){};
int exeTime;
int exeSaving;
int waitingTime;
};
/*
Function that get back the number of devices.
*/
int getDeviceNumber() {
/// Query the number of devices.
int d;
hipGetDeviceCount(&d);
return d;
}
/*
Function that create the arrays of gpus properties.
*/
device* getDeviceProp() {
int n = getDeviceNumber();
device *d = (device*)malloc(n * sizeof(device));
for (int i = 0; i < n; i++) {
/// Query the properties.
hipDeviceProp_t p;
hipGetDeviceProperties(&p, i);
d[i].name = p.name;
d[i].globalMem = p.totalGlobalMem;
d[i].maxThreadPerBlock = p.maxThreadsPerBlock;
d[i].maxBlockSize[0] = p.maxThreadsDim[0];
d[i].maxBlockSize[1] = p.maxThreadsDim[1];
d[i].maxBlockSize[2] = p.maxThreadsDim[2];
d[i].maxGridSize[0] = p.maxGridSize[0];
d[i].maxGridSize[1] = p.maxGridSize[1];
d[i].maxGridSize[2] = p.maxGridSize[2];
d[i].cuncurrentKernels = p.concurrentKernels;
d[i].registerPerBlock = p.regsPerBlock;
d[i].warpSize = p.warpSize;
log("Device " + std::to_string(i) + " " + p.name);
//log("Device memory:" + std::to_string(p.totalGlobalMem / (1024 * 1024)) + " GB");
//log("Warp size: " + std::to_string(p.warpSize));
//log("Register per block: " + std::to_string(p.regsPerBlock));
//log("Register per multiprocesssor: " + std::to_string(p.regsPerMultiprocessor));
}
return d;
}
/*
Function that update the performance measure.
*/
void updatePerformance(gpuPerformance* perf, float millisec, int N, int stuff){
if (stuff == 1){
if (perf->millisecAvgVariance == 0)
perf->millisecAvgVariance = millisec;
else
perf->millisecAvgVariance = (perf->millisecAvgCovariance + millisec) / 2;
perf->millisecTotVariance = perf->millisecTotVariance + millisec;
perf->bwVariance = (N * sizeof(int)* 4 + N * sizeof(float)) / millisec / 1e6;
}
if (stuff == 2){
if (perf->millisecAvgCovariance == 0)
perf->millisecAvgCovariance = millisec;
else
perf->millisecAvgCovariance = (perf->millisecAvgCovariance + millisec) / 2;
perf->millisecTotCovariance = perf->millisecTotCovariance + millisec;
perf->bwCovariance = (N * sizeof(int)* 4 + N * sizeof(float)) / millisec / 1e6;
}
if (stuff == 3) {
if (perf->millisecAvgCorrelation == 0)
perf->millisecAvgCorrelation = millisec;
else
perf->millisecAvgCorrelation = (perf->millisecAvgCorrelation + millisec) / 2;
perf->millisecTotCorrelation = perf->millisecTotCorrelation + millisec;
perf->bwCorrelation = (N * sizeof(float)* 4) / millisec / 1e6;
}
}
/*
Function used to save the performance.
*/
void savePerformance(int gpuNumber, int window, gpuPerformance* perf, cpuPerformance* cpuPerf, int pixels){
std::string RES_FOLDER = "output/";
std::string fileName = "N_" + std::to_string(pixels) + "_W_" + std::to_string(window) + "_performance.txt";
/// Open the performance file.
std::ofstream oFile(RES_FOLDER + fileName);
for (int i = 0; i < gpuNumber; i++){
oFile << "Device " + std::to_string(i) + " Avg Variance Time," + std::to_string(perf[i].millisecAvgVariance) << "\n";
oFile << "Device " + std::to_string(i) + " Tot Variance Time," + std::to_string(perf[i].millisecTotVariance) << "\n";
oFile << "Device " + std::to_string(i) + " Avg Covariance Time," + std::to_string(perf[i].millisecAvgCovariance) << "\n";
oFile << "Device " + std::to_string(i) + " Tot Covariance Time," + std::to_string(perf[i].millisecTotCovariance) << "\n";
oFile << "Device " + std::to_string(i) + " Avg Correlation Time," + std::to_string(perf[i].millisecAvgCorrelation) << "\n";
oFile << "Device " + std::to_string(i) + " Tot Correlation Time," + std::to_string(perf[i].millisecTotCorrelation) << "\n";
oFile << "Device " + std::to_string(i) + " BW Variance," + std::to_string(perf[i].bwVariance) << "\n";
oFile << "Device " + std::to_string(i) + " BW Covariance," + std::to_string(perf[i].bwCovariance) << "\n";
oFile << "Device " + std::to_string(i) + " BW Correlation," + std::to_string(perf[i].bwCorrelation) << "\n";
}
oFile << "CPU Tot Execution Time," + std::to_string((cpuPerf->exeTime) * 1000) << "\n";
oFile << "CPU Tot Saving Time," + std::to_string((cpuPerf->exeSaving) * 1000) << "\n";
oFile << "CPU Tot Waiting Time," + std::to_string((cpuPerf->waitingTime) * 1000) << "\n";
/// Close the file.
oFile.close();
}
| f4338b9af755e9ababe65a4890df86ad99748a49.cu | /*
*
* Program: Pearson Correlatrion Coefficient computation.
* Author: Andrea Purgato
* Version: integer input version.
*
* File: DeviceReader.cu
* Description: this file support the program with some functions that are related to the performance of the GPU devices.
*
*/
#pragma once
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include "Logger.cpp"
/*
Struct hat contains the relevant GPU info.
*/
struct device{
char *name;
size_t globalMem;
int warpSize;
int maxThreadPerBlock;
int maxBlockSize[3];
int maxGridSize[3];
int cuncurrentKernels;
int registerPerBlock;
};
/*
Struct used to store the performance.
*/
struct gpuPerformance{
gpuPerformance() : millisecAvgVariance(0), millisecAvgCovariance(0), millisecAvgCorrelation(0), millisecTotVariance(0), millisecTotCovariance(0), millisecTotCorrelation(0){};
float millisecAvgVariance;
float millisecAvgCovariance;
float millisecAvgCorrelation;
float millisecTotVariance;
float millisecTotCovariance;
float millisecTotCorrelation;
float bwVariance;
float bwCovariance;
float bwCorrelation;
};
struct cpuPerformance{
cpuPerformance() : exeTime(0), exeSaving(0), waitingTime(0){};
int exeTime;
int exeSaving;
int waitingTime;
};
/*
Function that get back the number of devices.
*/
int getDeviceNumber() {
/// Query the number of devices.
int d;
cudaGetDeviceCount(&d);
return d;
}
/*
Function that create the arrays of gpus properties.
*/
device* getDeviceProp() {
int n = getDeviceNumber();
device *d = (device*)malloc(n * sizeof(device));
for (int i = 0; i < n; i++) {
/// Query the properties.
cudaDeviceProp p;
cudaGetDeviceProperties(&p, i);
d[i].name = p.name;
d[i].globalMem = p.totalGlobalMem;
d[i].maxThreadPerBlock = p.maxThreadsPerBlock;
d[i].maxBlockSize[0] = p.maxThreadsDim[0];
d[i].maxBlockSize[1] = p.maxThreadsDim[1];
d[i].maxBlockSize[2] = p.maxThreadsDim[2];
d[i].maxGridSize[0] = p.maxGridSize[0];
d[i].maxGridSize[1] = p.maxGridSize[1];
d[i].maxGridSize[2] = p.maxGridSize[2];
d[i].cuncurrentKernels = p.concurrentKernels;
d[i].registerPerBlock = p.regsPerBlock;
d[i].warpSize = p.warpSize;
log("Device " + std::to_string(i) + " " + p.name);
//log("Device memory:" + std::to_string(p.totalGlobalMem / (1024 * 1024)) + " GB");
//log("Warp size: " + std::to_string(p.warpSize));
//log("Register per block: " + std::to_string(p.regsPerBlock));
//log("Register per multiprocesssor: " + std::to_string(p.regsPerMultiprocessor));
}
return d;
}
/*
Function that update the performance measure.
*/
void updatePerformance(gpuPerformance* perf, float millisec, int N, int stuff){
if (stuff == 1){
if (perf->millisecAvgVariance == 0)
perf->millisecAvgVariance = millisec;
else
perf->millisecAvgVariance = (perf->millisecAvgCovariance + millisec) / 2;
perf->millisecTotVariance = perf->millisecTotVariance + millisec;
perf->bwVariance = (N * sizeof(int)* 4 + N * sizeof(float)) / millisec / 1e6;
}
if (stuff == 2){
if (perf->millisecAvgCovariance == 0)
perf->millisecAvgCovariance = millisec;
else
perf->millisecAvgCovariance = (perf->millisecAvgCovariance + millisec) / 2;
perf->millisecTotCovariance = perf->millisecTotCovariance + millisec;
perf->bwCovariance = (N * sizeof(int)* 4 + N * sizeof(float)) / millisec / 1e6;
}
if (stuff == 3) {
if (perf->millisecAvgCorrelation == 0)
perf->millisecAvgCorrelation = millisec;
else
perf->millisecAvgCorrelation = (perf->millisecAvgCorrelation + millisec) / 2;
perf->millisecTotCorrelation = perf->millisecTotCorrelation + millisec;
perf->bwCorrelation = (N * sizeof(float)* 4) / millisec / 1e6;
}
}
/*
Function used to save the performance.
*/
void savePerformance(int gpuNumber, int window, gpuPerformance* perf, cpuPerformance* cpuPerf, int pixels){
std::string RES_FOLDER = "output/";
std::string fileName = "N_" + std::to_string(pixels) + "_W_" + std::to_string(window) + "_performance.txt";
/// Open the performance file.
std::ofstream oFile(RES_FOLDER + fileName);
for (int i = 0; i < gpuNumber; i++){
oFile << "Device " + std::to_string(i) + " Avg Variance Time," + std::to_string(perf[i].millisecAvgVariance) << "\n";
oFile << "Device " + std::to_string(i) + " Tot Variance Time," + std::to_string(perf[i].millisecTotVariance) << "\n";
oFile << "Device " + std::to_string(i) + " Avg Covariance Time," + std::to_string(perf[i].millisecAvgCovariance) << "\n";
oFile << "Device " + std::to_string(i) + " Tot Covariance Time," + std::to_string(perf[i].millisecTotCovariance) << "\n";
oFile << "Device " + std::to_string(i) + " Avg Correlation Time," + std::to_string(perf[i].millisecAvgCorrelation) << "\n";
oFile << "Device " + std::to_string(i) + " Tot Correlation Time," + std::to_string(perf[i].millisecTotCorrelation) << "\n";
oFile << "Device " + std::to_string(i) + " BW Variance," + std::to_string(perf[i].bwVariance) << "\n";
oFile << "Device " + std::to_string(i) + " BW Covariance," + std::to_string(perf[i].bwCovariance) << "\n";
oFile << "Device " + std::to_string(i) + " BW Correlation," + std::to_string(perf[i].bwCorrelation) << "\n";
}
oFile << "CPU Tot Execution Time," + std::to_string((cpuPerf->exeTime) * 1000) << "\n";
oFile << "CPU Tot Saving Time," + std::to_string((cpuPerf->exeSaving) * 1000) << "\n";
oFile << "CPU Tot Waiting Time," + std::to_string((cpuPerf->waitingTime) * 1000) << "\n";
/// Close the file.
oFile.close();
}
|
2ce2341239f9f0a8d3a2dd30b69ee20833c2d7e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "psrdada_cpp/meerkat/tools/feng_to_dada.cuh"
#include "psrdada_cpp/meerkat/constants.hpp"
#include "psrdada_cpp/cuda_utils.hpp"
#include "psrdada_cpp/common.hpp"
#include "hip/hip_complex.h"
#include <fstream>
#include <iomanip>
namespace psrdada_cpp {
namespace meerkat {
namespace tools {
namespace kernels {
/*
__global__ void feng_heaps_to_dada(
int* __restrict__ in, int* __restrict__ out,
int nchans)
{
//This kernel only works with 256 threads
//Nchans must be a multiple of 32
__shared__ int transpose_buffer[8][32][32+1];
int const warp_idx = threadIdx.x >> 0x5;
int const lane_idx = threadIdx.x & 0x1f;
//blockIdx.x == timestamp
int offset = blockIdx.x * nchans * MEERKAT_FENG_NSAMPS_PER_HEAP;
//Treat the real and imaginary for both polarisations as
//a single integer. This means we are dealing with TFT data.
//Each warp does a 32 x 32 element transform
int chan_idx;
for (int channel_offset=0; channel_offset<nchans; channel_offset+=32)
{
int chan_offset = offset + channel_offset * MEERKAT_FENG_NSAMPS_PER_HEAP;
for (chan_idx=0; chan_idx<32; ++chan_idx)
{
transpose_buffer[warp_idx][chan_idx][lane_idx] = in[chan_offset + MEERKAT_FENG_NSAMPS_PER_HEAP * chan_idx + threadIdx.x];
}
int samp_offset = offset + nchans * warp_idx * 32 + channel_offset;
for (chan_idx=0; chan_idx<32; ++chan_idx)
{
out[samp_offset + lane_idx] = transpose_buffer[warp_idx][lane_idx][chan_idx];
}
}
}
*/
__global__ void feng_heaps_to_dada(
int* __restrict__ in,
int* __restrict__ out,
int nchans)
{
__shared__ int transpose_buffer[32][32];
int const warp_idx = threadIdx.x >> 0x5;
int const lane_idx = threadIdx.x & 0x1f;
int offset = blockIdx.x * nchans * MEERKAT_FENG_NSAMPS_PER_HEAP;
for (int time_idx=0; time_idx < MEERKAT_FENG_NSAMPS_PER_HEAP; time_idx+=warpSize)
{
int toffset = offset + (time_idx + lane_idx);
int coffset = offset + (time_idx + warp_idx) * nchans;
for (int chan_idx = 0; chan_idx < nchans; chan_idx += warpSize)
{
int input_idx = (chan_idx + warp_idx) * MEERKAT_FENG_NSAMPS_PER_HEAP + toffset;
int output_idx = coffset + (chan_idx + lane_idx);
__syncthreads();
transpose_buffer[warp_idx][lane_idx] = in[input_idx];
__syncthreads();
out[output_idx] = transpose_buffer[lane_idx][warp_idx];
}
}
}
} //kernels
} //tools
} //meerkat
} //psrdada_cpp | 2ce2341239f9f0a8d3a2dd30b69ee20833c2d7e4.cu | #include "psrdada_cpp/meerkat/tools/feng_to_dada.cuh"
#include "psrdada_cpp/meerkat/constants.hpp"
#include "psrdada_cpp/cuda_utils.hpp"
#include "psrdada_cpp/common.hpp"
#include "cuComplex.h"
#include <fstream>
#include <iomanip>
namespace psrdada_cpp {
namespace meerkat {
namespace tools {
namespace kernels {
/*
__global__ void feng_heaps_to_dada(
int* __restrict__ in, int* __restrict__ out,
int nchans)
{
//This kernel only works with 256 threads
//Nchans must be a multiple of 32
__shared__ int transpose_buffer[8][32][32+1];
int const warp_idx = threadIdx.x >> 0x5;
int const lane_idx = threadIdx.x & 0x1f;
//blockIdx.x == timestamp
int offset = blockIdx.x * nchans * MEERKAT_FENG_NSAMPS_PER_HEAP;
//Treat the real and imaginary for both polarisations as
//a single integer. This means we are dealing with TFT data.
//Each warp does a 32 x 32 element transform
int chan_idx;
for (int channel_offset=0; channel_offset<nchans; channel_offset+=32)
{
int chan_offset = offset + channel_offset * MEERKAT_FENG_NSAMPS_PER_HEAP;
for (chan_idx=0; chan_idx<32; ++chan_idx)
{
transpose_buffer[warp_idx][chan_idx][lane_idx] = in[chan_offset + MEERKAT_FENG_NSAMPS_PER_HEAP * chan_idx + threadIdx.x];
}
int samp_offset = offset + nchans * warp_idx * 32 + channel_offset;
for (chan_idx=0; chan_idx<32; ++chan_idx)
{
out[samp_offset + lane_idx] = transpose_buffer[warp_idx][lane_idx][chan_idx];
}
}
}
*/
__global__ void feng_heaps_to_dada(
int* __restrict__ in,
int* __restrict__ out,
int nchans)
{
__shared__ int transpose_buffer[32][32];
int const warp_idx = threadIdx.x >> 0x5;
int const lane_idx = threadIdx.x & 0x1f;
int offset = blockIdx.x * nchans * MEERKAT_FENG_NSAMPS_PER_HEAP;
for (int time_idx=0; time_idx < MEERKAT_FENG_NSAMPS_PER_HEAP; time_idx+=warpSize)
{
int toffset = offset + (time_idx + lane_idx);
int coffset = offset + (time_idx + warp_idx) * nchans;
for (int chan_idx = 0; chan_idx < nchans; chan_idx += warpSize)
{
int input_idx = (chan_idx + warp_idx) * MEERKAT_FENG_NSAMPS_PER_HEAP + toffset;
int output_idx = coffset + (chan_idx + lane_idx);
__syncthreads();
transpose_buffer[warp_idx][lane_idx] = in[input_idx];
__syncthreads();
out[output_idx] = transpose_buffer[lane_idx][warp_idx];
}
}
}
} //kernels
} //tools
} //meerkat
} //psrdada_cpp |
213c55ab434406a008a9a5eca1edadd7e41863e2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "concatenate_rows_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
unsigned int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
concatenate_rows_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
concatenate_rows_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
concatenate_rows_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 213c55ab434406a008a9a5eca1edadd7e41863e2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "concatenate_rows_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
unsigned int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
concatenate_rows_kernel<<<gridBlock,threadBlock>>>(a,b,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
concatenate_rows_kernel<<<gridBlock,threadBlock>>>(a,b,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
concatenate_rows_kernel<<<gridBlock,threadBlock>>>(a,b,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2ff67afbf36c284277b0fdf77a46d37e3494af11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
#define BLOCKSIZE 512
__global__ void polynomial_expansion(float *poly, int degree, int n, float *array)
{
//TODO: Write code to use the GPU here!
//code should write the output back to array
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
{
float temp = array[index];
float out = 0, xtothepowerof = 1;
for (int i = 0; i <= degree; i++)
{
out += xtothepowerof * poly[i];
xtothepowerof *= temp;
}
array[index] = out;
}
}
int main(int argc, char *argv[])
{
//TODO: add usage
if (argc < 3)
{
std::cerr << "usage: " << argv[0] << " n degree" << std::endl;
return -1;
}
char *ptr;
long long int n = strtol(argv[1],&ptr,10);
int degree = atoi(argv[2]);
int nbiter = 2;
float* array = NULL;
float* poly = NULL;
hipHostMalloc((void **)&array,sizeof(float)*(n/2));
hipHostMalloc((void **)&poly,sizeof(float)*(degree+1));
for (int i = 0; i < n/2; ++i)
array[i] = 1.;
for (int i = 0; i < (degree + 1); ++i)
poly[i] = 1.;
float *d_array, *d_poly;
hipMalloc((void **)&d_array, (n/2) * sizeof(float));
hipMalloc((void **)&d_poly, (degree + 1) * sizeof(float));
long long int size = n * sizeof(float) / 8;
/*hipStream_t stream[4];
for (int i = 0; i < 4; ++i){
hipStreamCreate(&stream[i]);
}*/
hipMemcpy(d_poly, poly, (degree + 1) * sizeof(float), hipMemcpyHostToDevice);
hipSetDevice(1);
float* array1 = NULL;
float* poly1 = NULL;
hipHostMalloc((void **)&array1,sizeof(float)*(n/2));
hipHostMalloc((void **)&poly1,sizeof(float)*(degree+1));
for (int i = 0; i < n/2; ++i)
array1[i] = 1.;
for (int i = 0; i < (degree + 1); ++i)
poly1[i] = 1.;
float *d_array1, *d_poly1;
hipMalloc((void **)&d_array1, (n/2) * sizeof(float));
hipMalloc((void **)&d_poly1, (degree + 1) * sizeof(float));
/*hipStream_t stream1[4];
for (int i = 0; i < 4; ++i){
hipStreamCreate(&stream1[i]);
}*/
hipMemcpy(d_poly1, poly1, (degree + 1) * sizeof(float), hipMemcpyHostToDevice);
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
for(int k = 1; k <=nbiter; k++){
hipSetDevice(0);
hipStream_t stream[4];
for (int i = 0; i < 4; ++i){
hipStreamCreate(&stream[i]);
}
//for (int i = 0; i < 4; ++i) {
hipMemcpyAsync(d_array+ 0*size, array + 0*size,size, hipMemcpyHostToDevice, stream[0]);
hipLaunchKernelGGL(( polynomial_expansion) , dim3(((n/4) + BLOCKSIZE - 1) / BLOCKSIZE), dim3(BLOCKSIZE), 0, stream[0], d_poly, degree, n/4, d_array + 0*size);
hipMemcpyAsync(array+ 0*size, d_array+ 0*size,size, hipMemcpyDeviceToHost, stream[0]);
hipMemcpyAsync(d_array+ 1*size, array + 1*size,size, hipMemcpyHostToDevice, stream[1]);
hipLaunchKernelGGL(( polynomial_expansion) , dim3(((n/4) + BLOCKSIZE - 1) / BLOCKSIZE), dim3(BLOCKSIZE), 0, stream[1], d_poly, degree, n/4, d_array + 1*size);
hipMemcpyAsync(array+ 1*size, d_array+ 1*size,size, hipMemcpyDeviceToHost, stream[1]);
hipMemcpyAsync(d_array+ 2*size, array + 2*size,size, hipMemcpyHostToDevice, stream[2]);
hipLaunchKernelGGL(( polynomial_expansion) , dim3(((n/4) + BLOCKSIZE - 1) / BLOCKSIZE), dim3(BLOCKSIZE), 0, stream[2], d_poly, degree, n/4, d_array + 2*size);
hipMemcpyAsync(array+ 2*size, d_array+ 2*size,size, hipMemcpyDeviceToHost, stream[2]);
hipMemcpyAsync(d_array+ 3*size, array + 3*size,size, hipMemcpyHostToDevice, stream[3]);
hipLaunchKernelGGL(( polynomial_expansion) , dim3(((n/4) + BLOCKSIZE - 1) / BLOCKSIZE), dim3(BLOCKSIZE), 0, stream[3], d_poly, degree, n/4, d_array + 3*size);
hipMemcpyAsync(array+ 3*size, d_array+ 3*size,size, hipMemcpyDeviceToHost, stream[3]);
//}
hipSetDevice(1);
hipStream_t stream1[4];
for (int i = 0; i < 4; ++i){
hipStreamCreate(&stream1[i]);
}
hipMemcpyAsync(d_array1+ 0*size, array1 + 0*size,size, hipMemcpyHostToDevice, stream1[0]);
hipLaunchKernelGGL(( polynomial_expansion) , dim3(((n/4) + BLOCKSIZE - 1) / BLOCKSIZE), dim3(BLOCKSIZE), 0, stream1[0], d_poly1, degree, n/4, d_array1 + 0*size);
hipMemcpyAsync(array1+ 0*size, d_array1+ 0*size,size, hipMemcpyDeviceToHost, stream1[0]);
hipMemcpyAsync(d_array1+ 1*size, array1 + 1*size,size, hipMemcpyHostToDevice, stream1[1]);
hipLaunchKernelGGL(( polynomial_expansion) , dim3(((n/4) + BLOCKSIZE - 1) / BLOCKSIZE), dim3(BLOCKSIZE), 0, stream1[1], d_poly1, degree, n/4, d_array1 + 1*size);
hipMemcpyAsync(array1+ 1*size, d_array1+ 1*size,size, hipMemcpyDeviceToHost, stream1[1]);
hipMemcpyAsync(d_array1+ 2*size, array1 + 2*size,size, hipMemcpyHostToDevice, stream1[2]);
hipLaunchKernelGGL(( polynomial_expansion) , dim3(((n/4) + BLOCKSIZE - 1) / BLOCKSIZE), dim3(BLOCKSIZE), 0, stream1[2], d_poly1, degree, n/4, d_array1 + 2*size);
hipMemcpyAsync(array1+ 2*size, d_array1+ 2*size,size, hipMemcpyDeviceToHost, stream1[2]);
hipMemcpyAsync(d_array1 + 3*size, array1 + 3*size,size, hipMemcpyHostToDevice, stream1[3]);
hipLaunchKernelGGL(( polynomial_expansion) , dim3(((n/4) + BLOCKSIZE - 1) / BLOCKSIZE), dim3(BLOCKSIZE), 0, stream1[3], d_poly1, degree, n/4, d_array1 + 3*size);
hipMemcpyAsync(array1 + 3*size, d_array1 + 3*size,size, hipMemcpyDeviceToHost, stream1[3]);
//}
hipStreamSynchronize(stream1[0]);
hipStreamSynchronize(stream1[1]);
hipStreamSynchronize(stream1[2]);
hipStreamSynchronize(stream1[3]);
for (int i = 0; i < 4; ++i){
hipStreamDestroy(stream1[i]);
}
hipSetDevice(0);
hipStreamSynchronize(stream[0]);
hipStreamSynchronize(stream[1]);
hipStreamSynchronize(stream[2]);
hipStreamSynchronize(stream[3]);
for (int i = 0; i < 4; ++i){
hipStreamDestroy(stream[i]);
}
}
hipDeviceSynchronize();
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end - begin);
/*hipSetDevice(0);
for (int i = 0; i < 4; ++i){
hipStreamDestroy(stream[i]);
}*/
hipSetDevice(0);
hipHostFree(array);
hipHostFree(poly);
hipFree(d_array);
hipFree(d_poly);
/*hipSetDevice(1);
for (int i = 0; i < 4; ++i){
hipStreamDestroy(stream1[i]);
}*/
hipSetDevice(1);
hipHostFree(array1);
hipHostFree(poly1);
hipFree(d_array1);
hipFree(d_poly1);
double pciBW = 1.50e+10, gpumemBW = 2.88e+11 , gpuflopRate = 1.73e+12 , pciLat = 8.80594e-06;
double HtD = double(((nbiter*(n/2))*(sizeof(float)))/pciBW);
//std::cout<<"HTD: "<<HtD<<std::endl;
double DtH = double(((nbiter*(n/2))*(sizeof(float)))/pciBW);
double dProc = ::max(double((3.0*(n/2)*(degree+1))/(gpuflopRate)),(double(sizeof(float)*((nbiter*(n/2))+degree+1))/(gpumemBW)));
//std::cout<<"dproc:"<<dProc<<" "<<double((3.0*(n/2)*(degree+1))/(gpuflopRate))<<" "<<(double(sizeof(float)*((nbiter*(n/2))+degree+1))/(gpumemBW))<<std::endl;
double ideal_time = ::max(dProc,(HtD+DtH));
std::cout << double(n*sizeof(float))<< " " << degree << " " << ideal_time << " " << totaltime.count() << " " << (double(n)/(ideal_time)) << " " << ((n)*nbiter)/totaltime.count() << std::endl;
return 0;
}
| 2ff67afbf36c284277b0fdf77a46d37e3494af11.cu | #include <iostream>
#include <chrono>
#define BLOCKSIZE 512
__global__ void polynomial_expansion(float *poly, int degree, int n, float *array)
{
//TODO: Write code to use the GPU here!
//code should write the output back to array
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
{
float temp = array[index];
float out = 0, xtothepowerof = 1;
for (int i = 0; i <= degree; i++)
{
out += xtothepowerof * poly[i];
xtothepowerof *= temp;
}
array[index] = out;
}
}
int main(int argc, char *argv[])
{
//TODO: add usage
if (argc < 3)
{
std::cerr << "usage: " << argv[0] << " n degree" << std::endl;
return -1;
}
char *ptr;
long long int n = strtol(argv[1],&ptr,10);
int degree = atoi(argv[2]);
int nbiter = 2;
float* array = NULL;
float* poly = NULL;
cudaMallocHost((void **)&array,sizeof(float)*(n/2));
cudaMallocHost((void **)&poly,sizeof(float)*(degree+1));
for (int i = 0; i < n/2; ++i)
array[i] = 1.;
for (int i = 0; i < (degree + 1); ++i)
poly[i] = 1.;
float *d_array, *d_poly;
cudaMalloc((void **)&d_array, (n/2) * sizeof(float));
cudaMalloc((void **)&d_poly, (degree + 1) * sizeof(float));
long long int size = n * sizeof(float) / 8;
/*cudaStream_t stream[4];
for (int i = 0; i < 4; ++i){
cudaStreamCreate(&stream[i]);
}*/
cudaMemcpy(d_poly, poly, (degree + 1) * sizeof(float), cudaMemcpyHostToDevice);
cudaSetDevice(1);
float* array1 = NULL;
float* poly1 = NULL;
cudaMallocHost((void **)&array1,sizeof(float)*(n/2));
cudaMallocHost((void **)&poly1,sizeof(float)*(degree+1));
for (int i = 0; i < n/2; ++i)
array1[i] = 1.;
for (int i = 0; i < (degree + 1); ++i)
poly1[i] = 1.;
float *d_array1, *d_poly1;
cudaMalloc((void **)&d_array1, (n/2) * sizeof(float));
cudaMalloc((void **)&d_poly1, (degree + 1) * sizeof(float));
/*cudaStream_t stream1[4];
for (int i = 0; i < 4; ++i){
cudaStreamCreate(&stream1[i]);
}*/
cudaMemcpy(d_poly1, poly1, (degree + 1) * sizeof(float), cudaMemcpyHostToDevice);
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
for(int k = 1; k <=nbiter; k++){
cudaSetDevice(0);
cudaStream_t stream[4];
for (int i = 0; i < 4; ++i){
cudaStreamCreate(&stream[i]);
}
//for (int i = 0; i < 4; ++i) {
cudaMemcpyAsync(d_array+ 0*size, array + 0*size,size, cudaMemcpyHostToDevice, stream[0]);
polynomial_expansion <<<((n/4) + BLOCKSIZE - 1) / BLOCKSIZE, BLOCKSIZE, 0, stream[0]>>>(d_poly, degree, n/4, d_array + 0*size);
cudaMemcpyAsync(array+ 0*size, d_array+ 0*size,size, cudaMemcpyDeviceToHost, stream[0]);
cudaMemcpyAsync(d_array+ 1*size, array + 1*size,size, cudaMemcpyHostToDevice, stream[1]);
polynomial_expansion <<<((n/4) + BLOCKSIZE - 1) / BLOCKSIZE, BLOCKSIZE, 0, stream[1]>>>(d_poly, degree, n/4, d_array + 1*size);
cudaMemcpyAsync(array+ 1*size, d_array+ 1*size,size, cudaMemcpyDeviceToHost, stream[1]);
cudaMemcpyAsync(d_array+ 2*size, array + 2*size,size, cudaMemcpyHostToDevice, stream[2]);
polynomial_expansion <<<((n/4) + BLOCKSIZE - 1) / BLOCKSIZE, BLOCKSIZE, 0, stream[2]>>>(d_poly, degree, n/4, d_array + 2*size);
cudaMemcpyAsync(array+ 2*size, d_array+ 2*size,size, cudaMemcpyDeviceToHost, stream[2]);
cudaMemcpyAsync(d_array+ 3*size, array + 3*size,size, cudaMemcpyHostToDevice, stream[3]);
polynomial_expansion <<<((n/4) + BLOCKSIZE - 1) / BLOCKSIZE, BLOCKSIZE, 0, stream[3]>>>(d_poly, degree, n/4, d_array + 3*size);
cudaMemcpyAsync(array+ 3*size, d_array+ 3*size,size, cudaMemcpyDeviceToHost, stream[3]);
//}
cudaSetDevice(1);
cudaStream_t stream1[4];
for (int i = 0; i < 4; ++i){
cudaStreamCreate(&stream1[i]);
}
cudaMemcpyAsync(d_array1+ 0*size, array1 + 0*size,size, cudaMemcpyHostToDevice, stream1[0]);
polynomial_expansion <<<((n/4) + BLOCKSIZE - 1) / BLOCKSIZE, BLOCKSIZE, 0, stream1[0]>>>(d_poly1, degree, n/4, d_array1 + 0*size);
cudaMemcpyAsync(array1+ 0*size, d_array1+ 0*size,size, cudaMemcpyDeviceToHost, stream1[0]);
cudaMemcpyAsync(d_array1+ 1*size, array1 + 1*size,size, cudaMemcpyHostToDevice, stream1[1]);
polynomial_expansion <<<((n/4) + BLOCKSIZE - 1) / BLOCKSIZE, BLOCKSIZE, 0, stream1[1]>>>(d_poly1, degree, n/4, d_array1 + 1*size);
cudaMemcpyAsync(array1+ 1*size, d_array1+ 1*size,size, cudaMemcpyDeviceToHost, stream1[1]);
cudaMemcpyAsync(d_array1+ 2*size, array1 + 2*size,size, cudaMemcpyHostToDevice, stream1[2]);
polynomial_expansion <<<((n/4) + BLOCKSIZE - 1) / BLOCKSIZE, BLOCKSIZE, 0, stream1[2]>>>(d_poly1, degree, n/4, d_array1 + 2*size);
cudaMemcpyAsync(array1+ 2*size, d_array1+ 2*size,size, cudaMemcpyDeviceToHost, stream1[2]);
cudaMemcpyAsync(d_array1 + 3*size, array1 + 3*size,size, cudaMemcpyHostToDevice, stream1[3]);
polynomial_expansion <<<((n/4) + BLOCKSIZE - 1) / BLOCKSIZE, BLOCKSIZE, 0, stream1[3]>>>(d_poly1, degree, n/4, d_array1 + 3*size);
cudaMemcpyAsync(array1 + 3*size, d_array1 + 3*size,size, cudaMemcpyDeviceToHost, stream1[3]);
//}
cudaStreamSynchronize(stream1[0]);
cudaStreamSynchronize(stream1[1]);
cudaStreamSynchronize(stream1[2]);
cudaStreamSynchronize(stream1[3]);
for (int i = 0; i < 4; ++i){
cudaStreamDestroy(stream1[i]);
}
cudaSetDevice(0);
cudaStreamSynchronize(stream[0]);
cudaStreamSynchronize(stream[1]);
cudaStreamSynchronize(stream[2]);
cudaStreamSynchronize(stream[3]);
for (int i = 0; i < 4; ++i){
cudaStreamDestroy(stream[i]);
}
}
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end - begin);
/*cudaSetDevice(0);
for (int i = 0; i < 4; ++i){
cudaStreamDestroy(stream[i]);
}*/
cudaSetDevice(0);
cudaFreeHost(array);
cudaFreeHost(poly);
cudaFree(d_array);
cudaFree(d_poly);
/*cudaSetDevice(1);
for (int i = 0; i < 4; ++i){
cudaStreamDestroy(stream1[i]);
}*/
cudaSetDevice(1);
cudaFreeHost(array1);
cudaFreeHost(poly1);
cudaFree(d_array1);
cudaFree(d_poly1);
double pciBW = 1.50e+10, gpumemBW = 2.88e+11 , gpuflopRate = 1.73e+12 , pciLat = 8.80594e-06;
double HtD = double(((nbiter*(n/2))*(sizeof(float)))/pciBW);
//std::cout<<"HTD: "<<HtD<<std::endl;
double DtH = double(((nbiter*(n/2))*(sizeof(float)))/pciBW);
double dProc = std::max(double((3.0*(n/2)*(degree+1))/(gpuflopRate)),(double(sizeof(float)*((nbiter*(n/2))+degree+1))/(gpumemBW)));
//std::cout<<"dproc:"<<dProc<<" "<<double((3.0*(n/2)*(degree+1))/(gpuflopRate))<<" "<<(double(sizeof(float)*((nbiter*(n/2))+degree+1))/(gpumemBW))<<std::endl;
double ideal_time = std::max(dProc,(HtD+DtH));
std::cout << double(n*sizeof(float))<< " " << degree << " " << ideal_time << " " << totaltime.count() << " " << (double(n)/(ideal_time)) << " " << ((n)*nbiter)/totaltime.count() << std::endl;
return 0;
}
|
9b44e87e22cfaa2364bf5971b1f2463ef1c25d41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
void handleCudaError(hipError_t cudaERR){ //error handling
if (cudaERR!=hipSuccess){
printf("CUDA ERROR : %s\n", hipGetErrorString(cudaERR));
}
}
__global__ void polynomial_expansion (float* poly, int degree,
int n, float* array) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index < n )
{
float out = 0.0;
float xtothepowerof = 1.0;
for ( int x = 0; x <= degree; ++x)
{
out += xtothepowerof * poly[x];
xtothepowerof *= array[index];
}
array[index] = out;
}
}
int main (int argc, char* argv[])
{
if (argc < 3)
{
std::cerr<<"usage: "<<argv[0]<<" n degree"<<std::endl;
return -1;
}
int n = atoi(argv[1]);
int degree = atoi(argv[2]);
int nbiter = 1;
float* array = new float[n];
float* poly = new float[degree+1];
for (int i=0; i<n; ++i){
array[i] = 1.;
}
for (int i=0; i<degree+1; ++i){
poly[i] = 1.;
}
float *d_array, *d_poly;
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
handleCudaError(hipMalloc(&d_array, n*sizeof(float)));
handleCudaError(hipMalloc(&d_poly, (degree+1)*sizeof(float)));
hipMemcpy(d_array, array, n*sizeof(float), hipMemcpyHostToDevice); //memory allocation
hipMemcpy(d_poly, poly,(degree+1)*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( polynomial_expansion), dim3((n+255)/256), dim3(256), 0, 0, d_poly, degree, n, d_array);
hipMemcpy(array, d_array, n*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_array);
hipFree(d_poly);
hipDeviceSynchronize();
{
bool correct = true;
int ind;
for (int i=0; i< n; ++i) {
if (fabs(array[i]-(degree+1))>0.01) {
correct = false;
ind = i;
}
}
if (!correct)
std::cerr<<"Result is incorrect. In particular array["<<ind<<"] should be "<<degree+1<<" not "<< array[ind]<<std::endl;
}
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end-begin)/nbiter;
std::cout<<n<<" "<<degree<<" "<<totaltime.count()<<std::endl;
delete[] array;
delete[] poly;
return 0;
}
| 9b44e87e22cfaa2364bf5971b1f2463ef1c25d41.cu | #include <iostream>
#include <chrono>
void handleCudaError(cudaError_t cudaERR){ //error handling
if (cudaERR!=cudaSuccess){
printf("CUDA ERROR : %s\n", cudaGetErrorString(cudaERR));
}
}
__global__ void polynomial_expansion (float* poly, int degree,
int n, float* array) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index < n )
{
float out = 0.0;
float xtothepowerof = 1.0;
for ( int x = 0; x <= degree; ++x)
{
out += xtothepowerof * poly[x];
xtothepowerof *= array[index];
}
array[index] = out;
}
}
int main (int argc, char* argv[])
{
if (argc < 3)
{
std::cerr<<"usage: "<<argv[0]<<" n degree"<<std::endl;
return -1;
}
int n = atoi(argv[1]);
int degree = atoi(argv[2]);
int nbiter = 1;
float* array = new float[n];
float* poly = new float[degree+1];
for (int i=0; i<n; ++i){
array[i] = 1.;
}
for (int i=0; i<degree+1; ++i){
poly[i] = 1.;
}
float *d_array, *d_poly;
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
handleCudaError(cudaMalloc(&d_array, n*sizeof(float)));
handleCudaError(cudaMalloc(&d_poly, (degree+1)*sizeof(float)));
cudaMemcpy(d_array, array, n*sizeof(float), cudaMemcpyHostToDevice); //memory allocation
cudaMemcpy(d_poly, poly,(degree+1)*sizeof(float), cudaMemcpyHostToDevice);
polynomial_expansion<<<(n+255)/256, 256>>>(d_poly, degree, n, d_array);
cudaMemcpy(array, d_array, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_array);
cudaFree(d_poly);
cudaDeviceSynchronize();
{
bool correct = true;
int ind;
for (int i=0; i< n; ++i) {
if (fabs(array[i]-(degree+1))>0.01) {
correct = false;
ind = i;
}
}
if (!correct)
std::cerr<<"Result is incorrect. In particular array["<<ind<<"] should be "<<degree+1<<" not "<< array[ind]<<std::endl;
}
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end-begin)/nbiter;
std::cout<<n<<" "<<degree<<" "<<totaltime.count()<<std::endl;
delete[] array;
delete[] poly;
return 0;
}
|
00fe7bc1414f70b4134b2badf9f818523f958d8c.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2021 RICOS Co. Ltd.
*
* This file is a part of ricosjp/monolish,
* and distributed under Apache-2.0 License
* https://github.com/ricosjp/monolish
*/
#include "hip/hip_runtime.h"
#include <iostream>
int main(int argc, char **argv) {
if (argc != 2) {
std::cout << "Usage: " << argv[0] << " [device number]" << std::endl;
return 1;
}
int device_number = std::stoi(argv[1]);
hipError_t cudaStatus;
int count;
cudaStatus = hipGetDeviceCount(&count);
if (cudaStatus != hipSuccess) {
std::cerr << "CUDA API hipGetDeviceCount failed" << cudaStatus << std::endl;
return cudaStatus;
}
if (device_number >= count) {
std::cerr << "Input device_number is larger than the number of GPU ("
<< device_number << " >= " << count << ")" << std::endl;
return 1;
}
hipDeviceProp_t prop;
cudaStatus = hipGetDeviceProperties(&prop, device_number);
if (cudaStatus != hipSuccess) {
std::cerr << "CUDA API hipGetDeviceProperties failed" << std::endl;
return cudaStatus;
}
std::cout << prop.major << prop.minor << std::endl;
return 0;
}
| 00fe7bc1414f70b4134b2badf9f818523f958d8c.cu | /**
* Copyright 2021 RICOS Co. Ltd.
*
* This file is a part of ricosjp/monolish,
* and distributed under Apache-2.0 License
* https://github.com/ricosjp/monolish
*/
#include "cuda_runtime.h"
#include <iostream>
int main(int argc, char **argv) {
if (argc != 2) {
std::cout << "Usage: " << argv[0] << " [device number]" << std::endl;
return 1;
}
int device_number = std::stoi(argv[1]);
cudaError_t cudaStatus;
int count;
cudaStatus = cudaGetDeviceCount(&count);
if (cudaStatus != cudaSuccess) {
std::cerr << "CUDA API cudaGetDeviceCount failed" << cudaStatus << std::endl;
return cudaStatus;
}
if (device_number >= count) {
std::cerr << "Input device_number is larger than the number of GPU ("
<< device_number << " >= " << count << ")" << std::endl;
return 1;
}
cudaDeviceProp prop;
cudaStatus = cudaGetDeviceProperties(&prop, device_number);
if (cudaStatus != cudaSuccess) {
std::cerr << "CUDA API cudaGetDeviceProperties failed" << std::endl;
return cudaStatus;
}
std::cout << prop.major << prop.minor << std::endl;
return 0;
}
|
b894a89568b271552fa19d8a02f396725f43c8f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "config.h"
#include "queue.cuh"
#include "sort_hip.cuh"
#include <stdio.h>
#ifndef FEL_SIZE
#define FEL_SIZE 1
#endif
/* perform i % j while checking for a negative result and wrapping it around the
* end:
* wrap(5, 4) = 1
* wrap(-1, 4) = 3
*/
__device__ int wrap(int i)
{
int result = i % FEL_SIZE;
if (result >= 0) {
return result;
}
return FEL_SIZE + result;
}
/* Finds the index of <item> in the range [start, end) of <ringbuffer> with <capacity>.
* <start> is allowed to be larger than <end>, i.e. the range are allowed to cross the end of the ringbuffer.
* Comparison is done with operator==. Returns -1 if the element is not found.
*/
__device__ int find_in_ringbuffer(queue_item *ringbuffer, int capacity, int start, int end, queue_item item) {
assert(start >= 0);
assert(end >= 0);
assert(capacity >= 0);
assert(ringbuffer != nullptr);
for (int i = start; i != end; i = (i + 1) % capacity) {
if (ringbuffer[i] == item) {
return i;
}
}
return -1;
}
/* search for the right insert position for <item> within [read, write) in the
* ringbuffer (i.e., fel[lp * FEL_SIZE]), comparing with operator>. */
__device__ int search_ringbuffer(queue_item *ringbuffer, int read, int write, queue_item item)
{
int start = (read - 1 + FEL_SIZE) % FEL_SIZE;
int end = (write + 1) % FEL_SIZE;
int i;
for (i = start; i != end; i = (i + 1) % FEL_SIZE)
{
int next = (i + 1) % FEL_SIZE;
if (i + 1 == write) {
return i + 1;
} else if (ringbuffer[next] > item) {
return i;
}
}
return i;
}
static __device__ void swap(queue_item *data, int x, int y)
{
queue_item temp = data[x];
data[x] = data[y];
data[y] = temp;
}
static __device__ int partition(queue_item *data, int lp, int left, int right)
{
const int mid = left + (right - left) / 2;
const queue_item pivot = data[wrap(mid)];
swap(data, wrap(mid), wrap(left));
int i = left + 1;
int j = right;
while (i <= j) {
while (i <= j && data[wrap(i)] <= pivot) {
i++;
}
while (i <= j && data[wrap(j)] > pivot) {
j--;
}
if (i < j) {
swap(data, wrap(i), wrap(j));
}
}
swap(data, wrap(i - 1), wrap(left));
return i - 1;
}
__device__ void quicksort_seq(queue_item *data, int lp, int left, int right)
{
if(left == right)
return;
if (left > right) {
right = FEL_SIZE + right;
}
int stack_size = 0;
sort_data stack[QUICKSORT_STACK_SIZE];
stack[stack_size++] = {left, right};
while (stack_size > 0) {
int curr_left = stack[stack_size - 1].left;
int curr_right = stack[stack_size - 1].right;
stack_size--;
if (curr_left < curr_right) {
int part = partition(data, lp, curr_left, curr_right);
stack[stack_size++] = {curr_left, part - 1};
stack[stack_size++] = {part + 1, curr_right};
}
}
}
__device__ void insert_sorted_parallel(queue_item *data, int read, int write, queue_item item)
{
const unsigned int tIdx = threadIdx.x;
__shared__ bool finished;
if(!tIdx)
finished = false;
if(write < read)
write += FEL_SIZE;
write -= read;
int i;
int nBlockDim = -blockDim.x;
bool insert = false;
int insert_pos;
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
for(i = write - blockDim.x + 1; i > nBlockDim + 1; i -= blockDim.x)
#else
for(i = -1; i < write - 1; i += blockDim.x)
#endif
{
bool copy = false;
queue_item tmp = {-1, -1}; // must be smaller than any real queue_item
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
if(i + (int)tIdx > 0)
tmp = data[wrap(read + i + tIdx - 1)];
#else
if(i + (int)tIdx < write - 1)
tmp = data[wrap(read + i + tIdx + 1)];
/* if(tmp.x != -1)
printf("checking %d/%d %.2f\n", tmp.x, tmp.y, __half2float(tmp.f)); */
#endif
__syncthreads();
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
if(i + (int)tIdx > 0)
#else
if(i + (int)tIdx < write - 1)
#endif
{
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
if(tmp > item)
#else
if(tmp < item)
#endif
{
copy = true;
}
else
{
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
if(i + (int)tIdx == write || data[wrap(read + i + tIdx)] > item)
#else
if(i + (int)tIdx == -1 || data[wrap(read + i + tIdx)] < item)
#endif
{
insert = true;
insert_pos = i + (int)tIdx;
}
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
if(tIdx == blockDim.x - 1)
#else
if(tIdx == 0)
#endif
finished = true;
}
if(copy)
{
data[wrap(read + i + tIdx)] = tmp;
}
}
__syncthreads(); // for element at block border and 'finished'
if(finished)
break;
}
#if !defined(LOCAL_ARRAY_QUEUE_BACKWARD)
if(!tIdx && (data[wrap(read + write - 1)] < item))
{
insert = true;
insert_pos = write - 1;
}
#endif
if(!tIdx && ((write == 0) || data[wrap(read + 0)] > item))
{
// printf("%d: c\n", tIdx + i);
insert = true;
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
insert_pos = 0;
#else
insert_pos = -1;
#endif
}
__syncthreads();
if(insert)
{
data[wrap(read + insert_pos)] = item;
}
}
__device__ int insertion_buf_wrap(int i)
{
int result = i % LOCAL_ARRAY_QUEUE_SINGLE_PASS_MAX_ENQUEUE;
if (result >= 0) {
return result;
}
return LOCAL_ARRAY_QUEUE_SINGLE_PASS_MAX_ENQUEUE + result;
}
/* perform an insertion sort with <ev> into <data> in the range [read, write) */
__device__ void insert_sorted_insertion_buf(queue_item *data, int read, int write, queue_item item)
{
int start = read;
int end = insertion_buf_wrap(write);
if (read == write) {
data[insertion_buf_wrap(read - 1)] = item;
return;
}
int prev = insertion_buf_wrap(start - 1);
for (int i = start; i != end; i = insertion_buf_wrap(i + 1)) {
prev = insertion_buf_wrap(i - 1);
if (item > data[i]) {
data[prev] = data[i];
} else {
data[prev] = item;
return;
}
}
data[insertion_buf_wrap(end - 1)] = item;
}
#ifdef LOCAL_ARRAY_QUEUE_SINGLE_PASS
/* perform an insertion sort with <ev> into <data> in the range [read, write) */
__device__ void insert_sorted_single_pass(queue_item *data, int read, int write, int num_new)
{
if(num_new == 0)
return;
if(num_new > LOCAL_ARRAY_QUEUE_SINGLE_PASS_MAX_ENQUEUE)
{
printf("LOCAL_ARRAY_QUEUE_SINGLE_PASS_MAX_ENQUEUE exceeded\n");
return;
}
__shared__ queue_item insertion_buf_[MAX_THREADS__LOCAL_ARRAY_QUEUE * LOCAL_ARRAY_QUEUE_SINGLE_PASS_MAX_ENQUEUE];
queue_item *insertion_buf = &insertion_buf_[threadIdx.x * LOCAL_ARRAY_QUEUE_SINGLE_PASS_MAX_ENQUEUE];
int insertion_buf_read = 0;
int insertion_buf_write = 0;
for(int i = 0; i < num_new; i++)
{
queue_item item = data[wrap(write + i)];
insert_sorted_insertion_buf(&insertion_buf[0], insertion_buf_read, 0, item);
insertion_buf_read = insertion_buf_wrap(insertion_buf_read - 1);
}
int queue_elements = read <= write ? (write - read) : write - read + FEL_SIZE;
for(int i = 0; i < queue_elements + num_new; i++)
{
if(i >= queue_elements)
{
queue_item i_item = insertion_buf[insertion_buf_read];
data[wrap(read + i)] = i_item;
insertion_buf_read = insertion_buf_wrap(insertion_buf_read + 1);
}
else if(insertion_buf[insertion_buf_read] <= data[wrap(read + i)])
{
queue_item i_item = insertion_buf[insertion_buf_read];
insertion_buf_read = insertion_buf_wrap(insertion_buf_read + 1);
insert_sorted_insertion_buf(&insertion_buf[0], insertion_buf_read, insertion_buf_write, data[wrap(read + i)]);
insertion_buf_read = insertion_buf_wrap(insertion_buf_read - 1);
data[wrap(read + i)] = i_item;
}
}
}
#endif
/* perform an insertion sort with <ev> into <data> in the range [read, write) */
__device__ void insert_sorted(queue_item *data, int read, int write, queue_item item)
{
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
int start = wrap(write - 1);
int end = wrap(read - 1);
int queue_elements = read <= write ? (write - read) : write - read + FEL_SIZE;
if (read == write || data[start] <= item) {
data[write] = item;
return;
}
for (int i = start; i != end; i = wrap(i - 1)) {
if (data[i] > item) {
int next = wrap(i + 1);
data[next] = data[i];
}
int prev = wrap(i - 1);
if (i == read || data[prev] <= item) {
data[i] = item;
return;
}
}
#else
int start = wrap(read);
int end = wrap(write);
int final = wrap(write - 1);
int queue_elements = read <= write ? (write - read) : write - read + FEL_SIZE;
if (read == write || data[start] >= item) {
data[wrap(start - 1)] = item;
return;
}
for (int i = start; i != end; i = wrap(i + 1)) {
if (data[i] < item) {
int prev = wrap(i - 1);
data[prev] = data[i];
}
int next = wrap(i + 1);
if (i == final || data[next] >= item) {
data[i] = item;
return;
}
}
#endif
}
| b894a89568b271552fa19d8a02f396725f43c8f4.cu | #include "config.h"
#include "queue.cuh"
#include "sort.cuh"
#include <stdio.h>
#ifndef FEL_SIZE
#define FEL_SIZE 1
#endif
/* perform i % j while checking for a negative result and wrapping it around the
* end:
* wrap(5, 4) = 1
* wrap(-1, 4) = 3
*/
__device__ int wrap(int i)
{
int result = i % FEL_SIZE;
if (result >= 0) {
return result;
}
return FEL_SIZE + result;
}
/* Finds the index of <item> in the range [start, end) of <ringbuffer> with <capacity>.
* <start> is allowed to be larger than <end>, i.e. the range are allowed to cross the end of the ringbuffer.
* Comparison is done with operator==. Returns -1 if the element is not found.
*/
__device__ int find_in_ringbuffer(queue_item *ringbuffer, int capacity, int start, int end, queue_item item) {
assert(start >= 0);
assert(end >= 0);
assert(capacity >= 0);
assert(ringbuffer != nullptr);
for (int i = start; i != end; i = (i + 1) % capacity) {
if (ringbuffer[i] == item) {
return i;
}
}
return -1;
}
/* search for the right insert position for <item> within [read, write) in the
* ringbuffer (i.e., fel[lp * FEL_SIZE]), comparing with operator>. */
__device__ int search_ringbuffer(queue_item *ringbuffer, int read, int write, queue_item item)
{
int start = (read - 1 + FEL_SIZE) % FEL_SIZE;
int end = (write + 1) % FEL_SIZE;
int i;
for (i = start; i != end; i = (i + 1) % FEL_SIZE)
{
int next = (i + 1) % FEL_SIZE;
if (i + 1 == write) {
return i + 1;
} else if (ringbuffer[next] > item) {
return i;
}
}
return i;
}
static __device__ void swap(queue_item *data, int x, int y)
{
queue_item temp = data[x];
data[x] = data[y];
data[y] = temp;
}
static __device__ int partition(queue_item *data, int lp, int left, int right)
{
const int mid = left + (right - left) / 2;
const queue_item pivot = data[wrap(mid)];
swap(data, wrap(mid), wrap(left));
int i = left + 1;
int j = right;
while (i <= j) {
while (i <= j && data[wrap(i)] <= pivot) {
i++;
}
while (i <= j && data[wrap(j)] > pivot) {
j--;
}
if (i < j) {
swap(data, wrap(i), wrap(j));
}
}
swap(data, wrap(i - 1), wrap(left));
return i - 1;
}
__device__ void quicksort_seq(queue_item *data, int lp, int left, int right)
{
if(left == right)
return;
if (left > right) {
right = FEL_SIZE + right;
}
int stack_size = 0;
sort_data stack[QUICKSORT_STACK_SIZE];
stack[stack_size++] = {left, right};
while (stack_size > 0) {
int curr_left = stack[stack_size - 1].left;
int curr_right = stack[stack_size - 1].right;
stack_size--;
if (curr_left < curr_right) {
int part = partition(data, lp, curr_left, curr_right);
stack[stack_size++] = {curr_left, part - 1};
stack[stack_size++] = {part + 1, curr_right};
}
}
}
__device__ void insert_sorted_parallel(queue_item *data, int read, int write, queue_item item)
{
const unsigned int tIdx = threadIdx.x;
__shared__ bool finished;
if(!tIdx)
finished = false;
if(write < read)
write += FEL_SIZE;
write -= read;
int i;
int nBlockDim = -blockDim.x;
bool insert = false;
int insert_pos;
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
for(i = write - blockDim.x + 1; i > nBlockDim + 1; i -= blockDim.x)
#else
for(i = -1; i < write - 1; i += blockDim.x)
#endif
{
bool copy = false;
queue_item tmp = {-1, -1}; // must be smaller than any real queue_item
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
if(i + (int)tIdx > 0)
tmp = data[wrap(read + i + tIdx - 1)];
#else
if(i + (int)tIdx < write - 1)
tmp = data[wrap(read + i + tIdx + 1)];
/* if(tmp.x != -1)
printf("checking %d/%d %.2f\n", tmp.x, tmp.y, __half2float(tmp.f)); */
#endif
__syncthreads();
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
if(i + (int)tIdx > 0)
#else
if(i + (int)tIdx < write - 1)
#endif
{
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
if(tmp > item)
#else
if(tmp < item)
#endif
{
copy = true;
}
else
{
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
if(i + (int)tIdx == write || data[wrap(read + i + tIdx)] > item)
#else
if(i + (int)tIdx == -1 || data[wrap(read + i + tIdx)] < item)
#endif
{
insert = true;
insert_pos = i + (int)tIdx;
}
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
if(tIdx == blockDim.x - 1)
#else
if(tIdx == 0)
#endif
finished = true;
}
if(copy)
{
data[wrap(read + i + tIdx)] = tmp;
}
}
__syncthreads(); // for element at block border and 'finished'
if(finished)
break;
}
#if !defined(LOCAL_ARRAY_QUEUE_BACKWARD)
if(!tIdx && (data[wrap(read + write - 1)] < item))
{
insert = true;
insert_pos = write - 1;
}
#endif
if(!tIdx && ((write == 0) || data[wrap(read + 0)] > item))
{
// printf("%d: c\n", tIdx + i);
insert = true;
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
insert_pos = 0;
#else
insert_pos = -1;
#endif
}
__syncthreads();
if(insert)
{
data[wrap(read + insert_pos)] = item;
}
}
__device__ int insertion_buf_wrap(int i)
{
int result = i % LOCAL_ARRAY_QUEUE_SINGLE_PASS_MAX_ENQUEUE;
if (result >= 0) {
return result;
}
return LOCAL_ARRAY_QUEUE_SINGLE_PASS_MAX_ENQUEUE + result;
}
/* perform an insertion sort with <ev> into <data> in the range [read, write) */
__device__ void insert_sorted_insertion_buf(queue_item *data, int read, int write, queue_item item)
{
int start = read;
int end = insertion_buf_wrap(write);
if (read == write) {
data[insertion_buf_wrap(read - 1)] = item;
return;
}
int prev = insertion_buf_wrap(start - 1);
for (int i = start; i != end; i = insertion_buf_wrap(i + 1)) {
prev = insertion_buf_wrap(i - 1);
if (item > data[i]) {
data[prev] = data[i];
} else {
data[prev] = item;
return;
}
}
data[insertion_buf_wrap(end - 1)] = item;
}
#ifdef LOCAL_ARRAY_QUEUE_SINGLE_PASS
/* perform an insertion sort with <ev> into <data> in the range [read, write) */
__device__ void insert_sorted_single_pass(queue_item *data, int read, int write, int num_new)
{
if(num_new == 0)
return;
if(num_new > LOCAL_ARRAY_QUEUE_SINGLE_PASS_MAX_ENQUEUE)
{
printf("LOCAL_ARRAY_QUEUE_SINGLE_PASS_MAX_ENQUEUE exceeded\n");
return;
}
__shared__ queue_item insertion_buf_[MAX_THREADS__LOCAL_ARRAY_QUEUE * LOCAL_ARRAY_QUEUE_SINGLE_PASS_MAX_ENQUEUE];
queue_item *insertion_buf = &insertion_buf_[threadIdx.x * LOCAL_ARRAY_QUEUE_SINGLE_PASS_MAX_ENQUEUE];
int insertion_buf_read = 0;
int insertion_buf_write = 0;
for(int i = 0; i < num_new; i++)
{
queue_item item = data[wrap(write + i)];
insert_sorted_insertion_buf(&insertion_buf[0], insertion_buf_read, 0, item);
insertion_buf_read = insertion_buf_wrap(insertion_buf_read - 1);
}
int queue_elements = read <= write ? (write - read) : write - read + FEL_SIZE;
for(int i = 0; i < queue_elements + num_new; i++)
{
if(i >= queue_elements)
{
queue_item i_item = insertion_buf[insertion_buf_read];
data[wrap(read + i)] = i_item;
insertion_buf_read = insertion_buf_wrap(insertion_buf_read + 1);
}
else if(insertion_buf[insertion_buf_read] <= data[wrap(read + i)])
{
queue_item i_item = insertion_buf[insertion_buf_read];
insertion_buf_read = insertion_buf_wrap(insertion_buf_read + 1);
insert_sorted_insertion_buf(&insertion_buf[0], insertion_buf_read, insertion_buf_write, data[wrap(read + i)]);
insertion_buf_read = insertion_buf_wrap(insertion_buf_read - 1);
data[wrap(read + i)] = i_item;
}
}
}
#endif
/* perform an insertion sort with <ev> into <data> in the range [read, write) */
__device__ void insert_sorted(queue_item *data, int read, int write, queue_item item)
{
#ifdef LOCAL_ARRAY_QUEUE_BACKWARD
int start = wrap(write - 1);
int end = wrap(read - 1);
int queue_elements = read <= write ? (write - read) : write - read + FEL_SIZE;
if (read == write || data[start] <= item) {
data[write] = item;
return;
}
for (int i = start; i != end; i = wrap(i - 1)) {
if (data[i] > item) {
int next = wrap(i + 1);
data[next] = data[i];
}
int prev = wrap(i - 1);
if (i == read || data[prev] <= item) {
data[i] = item;
return;
}
}
#else
int start = wrap(read);
int end = wrap(write);
int final = wrap(write - 1);
int queue_elements = read <= write ? (write - read) : write - read + FEL_SIZE;
if (read == write || data[start] >= item) {
data[wrap(start - 1)] = item;
return;
}
for (int i = start; i != end; i = wrap(i + 1)) {
if (data[i] < item) {
int prev = wrap(i - 1);
data[prev] = data[i];
}
int next = wrap(i + 1);
if (i == final || data[next] >= item) {
data[i] = item;
return;
}
}
#endif
}
|
409dfba648a925dc1b0cf40872baae60ee368f86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include "commonFunctions.h" // generic malloc
#include "sumOptions.h" // options handling
/*
How to compile:
nvcc "filename" anyoption.cpp
*/
// sum array of integers sequentially (using cache)
void arraySum(int *arr, int arraySize, int *sumValue, int numCycles) {
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < arraySize; i++) {
tempSum += arr[i];
}
}
*sumValue = tempSum;
}
// sum array of integers non-sequentially (not using cache)
void arraySumStride(int *arr, int arraySize, int *sumValue, int numCycles, int cacheLineSize) {
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < cacheLineSize; i++) {
for (int j = i; j < arraySize; j += cacheLineSize) {
tempSum += arr[j];
}
}
}
*sumValue = tempSum;
}
// sum array on multiple threads on GPU
__global__ void arraySumGPU(int **arr, int arraySize, int *sumValue, int numCycles) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < arraySize; i++) {
tempSum += arr[index][i];
}
}
sumValue[index] = tempSum;
}
// sum array of integers non-sequentially (not using cache) on multiple threads on GPU
__global__ void arraySumStrideGPU(int **arr, int arraySize, int *sumValue, int numCycles, int cacheLineSize) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < cacheLineSize; i++) {
for (int j = i; j < arraySize; j += cacheLineSize) {
tempSum += arr[index][j];
}
}
}
sumValue[index] = tempSum;
}
// initialize array of arrays using multiple threads
__global__ void initializeArraysGPU(int **arrGPU, int arraySize) {
int index = threadIdx.x; // current thread ID
for (int i = 0; i < arraySize; i++) {
arrGPU[index][i] = 1;
}
}
int main(int argc, char *argv[])
{
int arraySize = 1 << 20; // 1M integers
int usesCache = 1; // 0: don't use cache, 1: use cache (default)
int cacheLineSize = 64 / sizeof(int); // # integers per cache line on CPU
int cacheLineSizeGPU = 128 / sizeof(int); // # integers per cache line on GPU
int numCycles = 1000; // default # of repetitions on CPU
int numCyclesGPU = 30; // default # of repetitions on GPU
int *arrCPU, *sumValueCPU, **arrGPU, *sumValuesGPU; // arrGPU: array of arrays
int numThreads = 1; // # of threads per block
int numBlocks = 1; // # of blocks
int numThreadsTotal; // # of threads running concurrently (= numThreads * numBlocks)
int maxNumThreads = 1024;
int maxNumBlocks= 1024;
hipDeviceProp_t prop;
int device = -1;
hipGetDevice(&device);
hipDeviceSynchronize();
hipGetDeviceProperties(&prop, device);
int numSM = prop.multiProcessorCount;
printf("\nCompare execution time on CPU and GPU\n");
printf("Default options: use cache, arraySize= %d integers.\n", arraySize);
//printf("-- Default number of repetitions: %d (CPU), %d (GPU).\n", numCycles, numCyclesGPU);
printf("-- number of multiprocessors= %d\n", numSM);
printf("-- run comparisons until GPU is faster than CPU or until all numBlocks-numThreads configurations with max %d threads have failed\n", maxNumThreads);
readOptions(argc, argv, &usesCache, &numCycles, &numCyclesGPU, &arraySize); // read options from command line and update values accordingly
// don't use --rep
// allocate memory for CPU execution
genericMalloc((void**)&arrCPU, arraySize * sizeof(int));
genericMalloc((void**)&sumValueCPU, sizeof(int));
// initialize array on CPU
for (int i = 0; i < arraySize; i++) {
arrCPU[i] = 1;
}
*sumValueCPU = 0;
// Time measurement
int minRunTime = 5; // elapsedTime must be at least minRunTime seconds
int minNumCyclesCPU = 1, minNumCyclesGPU = 1;
int elapsedClocks = 0, startClock = 0, endClock = 0;
double elapsedTimeCPU= 0.f, avgElapsedTimeCPU= 0.f, elapsedTimeGPU= 0.f, avgElapsedTimeGPU= 0.f;
avgElapsedTimeGPU = avgElapsedTimeCPU + 1; // run at least once
// run comparisons until GPU is faster than CPU or until all valid numBlocks-numThreads configurations have failed
// start with 1 block and 1 thread, then double numThreads. If maxNumThreads is reached, double numBlocks and restart.
while ((avgElapsedTimeCPU < avgElapsedTimeGPU) && (numBlocks <= maxNumThreads)) {
numThreadsTotal = numBlocks * numThreads;
printf("NUM BLOCKS= %d, NUM THREADS PER BLOCK = %d\n", numBlocks, numThreads);
// Measure CPU execution time
elapsedTimeCPU = 0.f;
avgElapsedTimeCPU = 0.f;
numCycles = minNumCyclesCPU;
while (elapsedTimeCPU < minRunTime) {// double numCycles until execution takes at least minRunTime
numCycles *= 2;
startClock = clock();
if (usesCache) {
arraySum(arrCPU, arraySize, sumValueCPU, numThreadsTotal * numCycles);
}
else {
arraySumStride(arrCPU, arraySize, sumValueCPU, numThreadsTotal * numCycles, cacheLineSize);
}
endClock = clock();
elapsedClocks = endClock - startClock;
elapsedTimeCPU = ((double)(elapsedClocks)) / (CLOCKS_PER_SEC);
}
avgElapsedTimeCPU = elapsedTimeCPU / numCycles; // = avg time * numThreadsTotal
printf("CPU: Elapsed time= %fs. Average execution time= %fs.\n", elapsedTimeCPU, avgElapsedTimeCPU);
// Measure GPU execution time
// allocate memory for GPU execution
genericMalloc((void**)&arrGPU, numThreadsTotal * sizeof(int*));
genericMalloc((void**)&sumValuesGPU, numThreadsTotal * sizeof(int));
// allocate for each array copy
for (int i = 0; i < numThreadsTotal; i++) {
genericMalloc((void**)&arrGPU[i], arraySize * sizeof(int));
}
// initialize arrays on GPU
initializeArraysGPU << <1, numThreadsTotal >> > (arrGPU, arraySize);
hipDeviceSynchronize();
elapsedTimeGPU = 0.f;
avgElapsedTimeGPU = 0.f;
numCyclesGPU = minNumCyclesGPU;
while (elapsedTimeGPU < minRunTime) {// double numCyclesGPU until execution takes at least minRunTime
numCyclesGPU *= 2;
startClock = clock();
if (usesCache) {
arraySumGPU << <numBlocks, numThreads >> > (arrGPU, arraySize, sumValuesGPU, numCyclesGPU);
}
else {
arraySumStrideGPU << <numBlocks, numThreads >> > (arrGPU, arraySize, sumValuesGPU, numCyclesGPU, cacheLineSizeGPU);
}
hipDeviceSynchronize();
endClock = clock();
elapsedClocks = endClock - startClock;
elapsedTimeGPU = ((double)(elapsedClocks)) / (CLOCKS_PER_SEC);
//printf("----elapsedGPUTIME: %f\n", elapsedTimeGPU);
}
avgElapsedTimeGPU = elapsedTimeGPU / numCyclesGPU;
printf("GPU: Elapsed time= %fs. Average execution time= %fs.\n\n", elapsedTimeGPU, avgElapsedTimeGPU);
// Free GPU memory
genericFree(sumValuesGPU);
for (int i = 0; i < numThreadsTotal; i++) {
genericFree(arrGPU[i]);
}
genericFree(arrGPU);
if (avgElapsedTimeCPU < avgElapsedTimeGPU) {
if (numThreadsTotal == maxNumThreads) {
// double numBlocks and reset numThreads
numBlocks *= 2;
numThreads = 1;
}
else {
// double numThreads
numThreads *= 2;
}
}
}
// print comparison results
if (avgElapsedTimeCPU >= avgElapsedTimeGPU) {
printf("GPU surpassed CPU when running %d blocks, %d threads\n. CPU time: %f, GPU time: %f.\n\n", numBlocks, numThreads, avgElapsedTimeCPU, avgElapsedTimeGPU);
}
else {
printf("CPU is still faster.\n\n");
}
// free allocated memory for CPU
genericFree(arrCPU);
genericFree(sumValueCPU);
return 0;
}
| 409dfba648a925dc1b0cf40872baae60ee368f86.cu | #include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include "commonFunctions.h" // generic malloc
#include "sumOptions.h" // options handling
/*
How to compile:
nvcc "filename" anyoption.cpp
*/
// sum array of integers sequentially (using cache)
void arraySum(int *arr, int arraySize, int *sumValue, int numCycles) {
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < arraySize; i++) {
tempSum += arr[i];
}
}
*sumValue = tempSum;
}
// sum array of integers non-sequentially (not using cache)
void arraySumStride(int *arr, int arraySize, int *sumValue, int numCycles, int cacheLineSize) {
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < cacheLineSize; i++) {
for (int j = i; j < arraySize; j += cacheLineSize) {
tempSum += arr[j];
}
}
}
*sumValue = tempSum;
}
// sum array on multiple threads on GPU
__global__ void arraySumGPU(int **arr, int arraySize, int *sumValue, int numCycles) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < arraySize; i++) {
tempSum += arr[index][i];
}
}
sumValue[index] = tempSum;
}
// sum array of integers non-sequentially (not using cache) on multiple threads on GPU
__global__ void arraySumStrideGPU(int **arr, int arraySize, int *sumValue, int numCycles, int cacheLineSize) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < cacheLineSize; i++) {
for (int j = i; j < arraySize; j += cacheLineSize) {
tempSum += arr[index][j];
}
}
}
sumValue[index] = tempSum;
}
// initialize array of arrays using multiple threads
__global__ void initializeArraysGPU(int **arrGPU, int arraySize) {
int index = threadIdx.x; // current thread ID
for (int i = 0; i < arraySize; i++) {
arrGPU[index][i] = 1;
}
}
int main(int argc, char *argv[])
{
int arraySize = 1 << 20; // 1M integers
int usesCache = 1; // 0: don't use cache, 1: use cache (default)
int cacheLineSize = 64 / sizeof(int); // # integers per cache line on CPU
int cacheLineSizeGPU = 128 / sizeof(int); // # integers per cache line on GPU
int numCycles = 1000; // default # of repetitions on CPU
int numCyclesGPU = 30; // default # of repetitions on GPU
int *arrCPU, *sumValueCPU, **arrGPU, *sumValuesGPU; // arrGPU: array of arrays
int numThreads = 1; // # of threads per block
int numBlocks = 1; // # of blocks
int numThreadsTotal; // # of threads running concurrently (= numThreads * numBlocks)
int maxNumThreads = 1024;
int maxNumBlocks= 1024;
cudaDeviceProp prop;
int device = -1;
cudaGetDevice(&device);
cudaDeviceSynchronize();
cudaGetDeviceProperties(&prop, device);
int numSM = prop.multiProcessorCount;
printf("\nCompare execution time on CPU and GPU\n");
printf("Default options: use cache, arraySize= %d integers.\n", arraySize);
//printf("-- Default number of repetitions: %d (CPU), %d (GPU).\n", numCycles, numCyclesGPU);
printf("-- number of multiprocessors= %d\n", numSM);
printf("-- run comparisons until GPU is faster than CPU or until all numBlocks-numThreads configurations with max %d threads have failed\n", maxNumThreads);
readOptions(argc, argv, &usesCache, &numCycles, &numCyclesGPU, &arraySize); // read options from command line and update values accordingly
// don't use --rep
// allocate memory for CPU execution
genericMalloc((void**)&arrCPU, arraySize * sizeof(int));
genericMalloc((void**)&sumValueCPU, sizeof(int));
// initialize array on CPU
for (int i = 0; i < arraySize; i++) {
arrCPU[i] = 1;
}
*sumValueCPU = 0;
// Time measurement
int minRunTime = 5; // elapsedTime must be at least minRunTime seconds
int minNumCyclesCPU = 1, minNumCyclesGPU = 1;
int elapsedClocks = 0, startClock = 0, endClock = 0;
double elapsedTimeCPU= 0.f, avgElapsedTimeCPU= 0.f, elapsedTimeGPU= 0.f, avgElapsedTimeGPU= 0.f;
avgElapsedTimeGPU = avgElapsedTimeCPU + 1; // run at least once
// run comparisons until GPU is faster than CPU or until all valid numBlocks-numThreads configurations have failed
// start with 1 block and 1 thread, then double numThreads. If maxNumThreads is reached, double numBlocks and restart.
while ((avgElapsedTimeCPU < avgElapsedTimeGPU) && (numBlocks <= maxNumThreads)) {
numThreadsTotal = numBlocks * numThreads;
printf("NUM BLOCKS= %d, NUM THREADS PER BLOCK = %d\n", numBlocks, numThreads);
// Measure CPU execution time
elapsedTimeCPU = 0.f;
avgElapsedTimeCPU = 0.f;
numCycles = minNumCyclesCPU;
while (elapsedTimeCPU < minRunTime) {// double numCycles until execution takes at least minRunTime
numCycles *= 2;
startClock = clock();
if (usesCache) {
arraySum(arrCPU, arraySize, sumValueCPU, numThreadsTotal * numCycles);
}
else {
arraySumStride(arrCPU, arraySize, sumValueCPU, numThreadsTotal * numCycles, cacheLineSize);
}
endClock = clock();
elapsedClocks = endClock - startClock;
elapsedTimeCPU = ((double)(elapsedClocks)) / (CLOCKS_PER_SEC);
}
avgElapsedTimeCPU = elapsedTimeCPU / numCycles; // = avg time * numThreadsTotal
printf("CPU: Elapsed time= %fs. Average execution time= %fs.\n", elapsedTimeCPU, avgElapsedTimeCPU);
// Measure GPU execution time
// allocate memory for GPU execution
genericMalloc((void**)&arrGPU, numThreadsTotal * sizeof(int*));
genericMalloc((void**)&sumValuesGPU, numThreadsTotal * sizeof(int));
// allocate for each array copy
for (int i = 0; i < numThreadsTotal; i++) {
genericMalloc((void**)&arrGPU[i], arraySize * sizeof(int));
}
// initialize arrays on GPU
initializeArraysGPU << <1, numThreadsTotal >> > (arrGPU, arraySize);
cudaDeviceSynchronize();
elapsedTimeGPU = 0.f;
avgElapsedTimeGPU = 0.f;
numCyclesGPU = minNumCyclesGPU;
while (elapsedTimeGPU < minRunTime) {// double numCyclesGPU until execution takes at least minRunTime
numCyclesGPU *= 2;
startClock = clock();
if (usesCache) {
arraySumGPU << <numBlocks, numThreads >> > (arrGPU, arraySize, sumValuesGPU, numCyclesGPU);
}
else {
arraySumStrideGPU << <numBlocks, numThreads >> > (arrGPU, arraySize, sumValuesGPU, numCyclesGPU, cacheLineSizeGPU);
}
cudaDeviceSynchronize();
endClock = clock();
elapsedClocks = endClock - startClock;
elapsedTimeGPU = ((double)(elapsedClocks)) / (CLOCKS_PER_SEC);
//printf("----elapsedGPUTIME: %f\n", elapsedTimeGPU);
}
avgElapsedTimeGPU = elapsedTimeGPU / numCyclesGPU;
printf("GPU: Elapsed time= %fs. Average execution time= %fs.\n\n", elapsedTimeGPU, avgElapsedTimeGPU);
// Free GPU memory
genericFree(sumValuesGPU);
for (int i = 0; i < numThreadsTotal; i++) {
genericFree(arrGPU[i]);
}
genericFree(arrGPU);
if (avgElapsedTimeCPU < avgElapsedTimeGPU) {
if (numThreadsTotal == maxNumThreads) {
// double numBlocks and reset numThreads
numBlocks *= 2;
numThreads = 1;
}
else {
// double numThreads
numThreads *= 2;
}
}
}
// print comparison results
if (avgElapsedTimeCPU >= avgElapsedTimeGPU) {
printf("GPU surpassed CPU when running %d blocks, %d threads\n. CPU time: %f, GPU time: %f.\n\n", numBlocks, numThreads, avgElapsedTimeCPU, avgElapsedTimeGPU);
}
else {
printf("CPU is still faster.\n\n");
}
// free allocated memory for CPU
genericFree(arrCPU);
genericFree(sumValueCPU);
return 0;
}
|
4844cbd3837332ec3e52d6124e27c27d574dc134.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
__global__ void helloWorld() {
printf("Hello World\n");
}
int main()
{
int nx, ny;
nx = 16;
ny = 4;
dim3 block(8, 2, 1);
dim3 grid(nx / block.x, ny / block.y, 1);
helloWorld << <grid, block >> > ();
hipDeviceSynchronize();
hipDeviceReset();
return 0;
} | 4844cbd3837332ec3e52d6124e27c27d574dc134.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
__global__ void helloWorld() {
printf("Hello World\n");
}
int main()
{
int nx, ny;
nx = 16;
ny = 4;
dim3 block(8, 2, 1);
dim3 grid(nx / block.x, ny / block.y, 1);
helloWorld << <grid, block >> > ();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
240b78218cc11176f65693bff65ac46e0a6494a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void bres_calc_gpu( const float *x1, const float *x2, const float *q1,
const float *adt1, float *res1, const int *bound) {
float dx,dy,mu, ri, p1,vol1, p2,vol2, f;
dx = x1[0] - x2[0];
dy = x1[1] - x2[1];
ri = 1.0f/q1[0];
p1 = gm1*(q1[3]-0.5f*ri*(q1[1]*q1[1]+q1[2]*q1[2]));
if (*bound==1) {
res1[1] += + p1*dy;
res1[2] += - p1*dx;
}
else {
vol1 = ri*(q1[1]*dy - q1[2]*dx);
ri = 1.0f/qinf[0];
p2 = gm1*(qinf[3]-0.5f*ri*(qinf[1]*qinf[1]+qinf[2]*qinf[2]));
vol2 = ri*(qinf[1]*dy - qinf[2]*dx);
mu = (*adt1)*eps;
f = 0.5f*(vol1* q1[0] + vol2* qinf[0] ) + mu*(q1[0]-qinf[0]);
res1[0] += f;
f = 0.5f*(vol1* q1[1] + p1*dy + vol2* qinf[1] + p2*dy) + mu*(q1[1]-qinf[1]);
res1[1] += f;
f = 0.5f*(vol1* q1[2] - p1*dx + vol2* qinf[2] - p2*dx) + mu*(q1[2]-qinf[2]);
res1[2] += f;
f = 0.5f*(vol1*(q1[3]+p1) + vol2*(qinf[3]+p2) ) + mu*(q1[3]-qinf[3]);
res1[3] += f;
}
}
// CUDA kernel function
__global__ void op_cuda_bres_calc(
const float *__restrict ind_arg0,
const float *__restrict ind_arg1,
const float *__restrict ind_arg2,
float *__restrict ind_arg3,
const int *__restrict opDat0Map,
const int *__restrict opDat2Map,
const int *__restrict arg5,
int *ind_map,
short *arg_map,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
float arg4_l[4];
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ float *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg3_size = ind_arg_sizes[0+blockId*1];
ind_arg3_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1];
//set shared memory pointers
int nbytes = 0;
ind_arg3_s = (float *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x ){
ind_arg3_s[n] = ZERO_float;
}
__syncthreads();
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
int map2idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<4; d++ ){
arg4_l[d] = ZERO_float;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat2Map[n + offset_b + set_size * 0];
//user-supplied kernel call
bres_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg1+map2idx*4,
ind_arg2+map2idx*1,
arg4_l,
arg5+(n+offset_b)*1);
col2 = colors[n+offset_b];
}
//store local variables
int arg4_map;
if (col2>=0) {
arg4_map = arg_map[0*set_size+n+offset_b];
}
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg4_l[0] += ind_arg3_s[0+arg4_map*4];
arg4_l[1] += ind_arg3_s[1+arg4_map*4];
arg4_l[2] += ind_arg3_s[2+arg4_map*4];
arg4_l[3] += ind_arg3_s[3+arg4_map*4];
ind_arg3_s[0+arg4_map*4] = arg4_l[0];
ind_arg3_s[1+arg4_map*4] = arg4_l[1];
ind_arg3_s[2+arg4_map*4] = arg4_l[2];
ind_arg3_s[3+arg4_map*4] = arg4_l[3];
}
__syncthreads();
}
}
for ( int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x ){
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
}
//host stub function
void op_par_loop_bres_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
int nargs = 6;
op_arg args[6];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(3);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
int ninds = 4;
int inds[6] = {0,0,1,2,3,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: bres_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_3
int part_size = OP_PART_SIZE_3;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get_stage(name,set,part_size,nargs,args,ninds,inds,OP_STAGE_INC);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = Plan->nsharedCol[col];
hipLaunchKernelGGL(( op_cuda_bres_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(float *)arg0.data_d,
(float *)arg2.data_d,
(float *)arg3.data_d,
(float *)arg4.data_d,
arg0.map_data_d,
arg2.map_data_d,
(int*)arg5.data_d,
Plan->ind_map,
Plan->loc_map,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[3].time += wall_t2 - wall_t1;
}
| 240b78218cc11176f65693bff65ac46e0a6494a7.cu | //
// auto-generated by op2.py
//
//user function
__device__ void bres_calc_gpu( const float *x1, const float *x2, const float *q1,
const float *adt1, float *res1, const int *bound) {
float dx,dy,mu, ri, p1,vol1, p2,vol2, f;
dx = x1[0] - x2[0];
dy = x1[1] - x2[1];
ri = 1.0f/q1[0];
p1 = gm1*(q1[3]-0.5f*ri*(q1[1]*q1[1]+q1[2]*q1[2]));
if (*bound==1) {
res1[1] += + p1*dy;
res1[2] += - p1*dx;
}
else {
vol1 = ri*(q1[1]*dy - q1[2]*dx);
ri = 1.0f/qinf[0];
p2 = gm1*(qinf[3]-0.5f*ri*(qinf[1]*qinf[1]+qinf[2]*qinf[2]));
vol2 = ri*(qinf[1]*dy - qinf[2]*dx);
mu = (*adt1)*eps;
f = 0.5f*(vol1* q1[0] + vol2* qinf[0] ) + mu*(q1[0]-qinf[0]);
res1[0] += f;
f = 0.5f*(vol1* q1[1] + p1*dy + vol2* qinf[1] + p2*dy) + mu*(q1[1]-qinf[1]);
res1[1] += f;
f = 0.5f*(vol1* q1[2] - p1*dx + vol2* qinf[2] - p2*dx) + mu*(q1[2]-qinf[2]);
res1[2] += f;
f = 0.5f*(vol1*(q1[3]+p1) + vol2*(qinf[3]+p2) ) + mu*(q1[3]-qinf[3]);
res1[3] += f;
}
}
// CUDA kernel function
__global__ void op_cuda_bres_calc(
const float *__restrict ind_arg0,
const float *__restrict ind_arg1,
const float *__restrict ind_arg2,
float *__restrict ind_arg3,
const int *__restrict opDat0Map,
const int *__restrict opDat2Map,
const int *__restrict arg5,
int *ind_map,
short *arg_map,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
float arg4_l[4];
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ float *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg3_size = ind_arg_sizes[0+blockId*1];
ind_arg3_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1];
//set shared memory pointers
int nbytes = 0;
ind_arg3_s = (float *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x ){
ind_arg3_s[n] = ZERO_float;
}
__syncthreads();
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
int map2idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<4; d++ ){
arg4_l[d] = ZERO_float;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat2Map[n + offset_b + set_size * 0];
//user-supplied kernel call
bres_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg1+map2idx*4,
ind_arg2+map2idx*1,
arg4_l,
arg5+(n+offset_b)*1);
col2 = colors[n+offset_b];
}
//store local variables
int arg4_map;
if (col2>=0) {
arg4_map = arg_map[0*set_size+n+offset_b];
}
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg4_l[0] += ind_arg3_s[0+arg4_map*4];
arg4_l[1] += ind_arg3_s[1+arg4_map*4];
arg4_l[2] += ind_arg3_s[2+arg4_map*4];
arg4_l[3] += ind_arg3_s[3+arg4_map*4];
ind_arg3_s[0+arg4_map*4] = arg4_l[0];
ind_arg3_s[1+arg4_map*4] = arg4_l[1];
ind_arg3_s[2+arg4_map*4] = arg4_l[2];
ind_arg3_s[3+arg4_map*4] = arg4_l[3];
}
__syncthreads();
}
}
for ( int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x ){
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
}
//host stub function
void op_par_loop_bres_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
int nargs = 6;
op_arg args[6];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(3);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
int ninds = 4;
int inds[6] = {0,0,1,2,3,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: bres_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_3
int part_size = OP_PART_SIZE_3;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get_stage(name,set,part_size,nargs,args,ninds,inds,OP_STAGE_INC);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = Plan->nsharedCol[col];
op_cuda_bres_calc<<<nblocks,nthread,nshared>>>(
(float *)arg0.data_d,
(float *)arg2.data_d,
(float *)arg3.data_d,
(float *)arg4.data_d,
arg0.map_data_d,
arg2.map_data_d,
(int*)arg5.data_d,
Plan->ind_map,
Plan->loc_map,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[3].time += wall_t2 - wall_t1;
}
|
d756fbe90180466a316b0aeede9fe2425239ecdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
__global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
//Each thread gets same seed, a different sequence number, no offset
hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
} | d756fbe90180466a316b0aeede9fe2425239ecdf.cu | #include "includes.h"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
__global__ void render_init(int max_x, int max_y, curandState *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
//Each thread gets same seed, a different sequence number, no offset
curand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
} |
89c027661e751880aa0dada169a310df70448669.hip | // !!! This is a file automatically generated by hipify!!!
#include <UnitTest++.h>
#include <vector>
#include <cmath>
#ifdef __HIPCC__
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#endif
#include "CrossSectionHash.hh"
namespace Log2Hash_tester_namespace {
using namespace MonteRay;
class testLog2Hash {
public:
CUDA_CALLABLE_MEMBER
static size_t hashFunction(const double value) {
// For double
// shifts the bits and returns binary equivalent integer
//std::cout << "Debug -- Calling hashFunction(double)\n";
MONTERAY_ASSERT_MSG( value >= 0.0, "Negative values are not allowed.");
std::uint64_t i = (( std::log2(value)+40.0)*100.0);
return i;
}
CUDA_CALLABLE_MEMBER
static size_t hashFunction(const float value) {
// For float
// shifts the bits and returns binary equivalent integer
//std::cout << "Debug -- Calling hashFunction(float)\n";
MONTERAY_ASSERT_MSG( value >= 0.0f, "Negative values are not allowed.");
std::uint64_t i = (( std::log2(value)+40.0f)*100.0f);
return i;
}
template < typename TV, typename std::enable_if< sizeof(TV) == 8 >::type* = nullptr >
CUDA_CALLABLE_MEMBER
static
TV invHashFunction( const size_t index, const size_t minIndex = 0 ) {
//std::cout << "Debug -- Calling invHashFunction(index)->double\n";
TV value = std::exp2( ((index + minIndex) / 100.0 ) - 40.0 );
return value;
}
template < typename TV, typename std::enable_if< sizeof(TV) == 4 >::type* = nullptr >
CUDA_CALLABLE_MEMBER
static
TV invHashFunction( const size_t index, const size_t minIndex = 0 ) {
//std::cout << "Debug -- Calling invHashFunction(index)->float\n";
TV value = std::exp2( ((index + minIndex) / 100.0f ) - 40.0f );
return value;
}
};
SUITE( Log2Hash_tester ) {
using doubleXSHash = CrossSectionHash_t<double, testLog2Hash>;
using floatXSHash = CrossSectionHash_t<float, testLog2Hash>;
typedef doubleXSHash::NuclearData_t DoubleNuclearData_t;
typedef std::vector<double> DoubleNuclearDataVec_t;
typedef doubleXSHash::Index_t DoubleIndex_t;
typedef floatXSHash::NuclearData_t FloatNuclearData_t;
typedef std::vector<float> FloatNuclearDataVec_t;
typedef floatXSHash::Index_t FloatIndex_t;
TEST( ctor_w_double ) {
DoubleNuclearDataVec_t testValues = { 1.0, 2.0};
doubleXSHash hash(testValues);
double min = hash.getMinValue();
double max = hash.getMaxValue();
CHECK_EQUAL( 345, size_t( (std::log2( 1e-11 ) + 40)*100 ) );
CHECK_EQUAL( 678, size_t( (std::log2( 1e-10 ) + 40)*100 ) );
CHECK_EQUAL( 4996, size_t( (std::log2( 1e+3 ) + 40)*100 ) );
CHECK_EQUAL( 4996-345 + 1, hash.size() );
CHECK_EQUAL( 4652, hash.size() );
CHECK_EQUAL( 345, hash.getMinIndex() );
CHECK_EQUAL( 4651, hash.getHashIndex( hash.getMaxValue()) );
//printf( "Debug exp2( ( ( 0 + 345) / 100.0) - 40.0 ) = %20.15e \n", std::exp2( ( ( 0 + 345) / 100.0) - 40.0 ) );
CHECK_EQUAL( 9.939251007413245e-12 , testLog2Hash::invHashFunction<double>( 0, 345 ) );
CHECK_EQUAL( 9.939251007413245e-12 , hash.invHashFunction( 0 ) );
}
TEST( ctor_w_float ) {
FloatNuclearDataVec_t testValues = { 1.0, 2.0};
doubleXSHash hash(testValues);
CHECK_EQUAL( 4652, hash.size() );
}
TEST( getHashIndex_double ) {
DoubleNuclearDataVec_t testValues = { 1.0, 2.0};
doubleXSHash hash(testValues);
CHECK_EQUAL( 9.939251007413245e-12 , hash.getMinValue() );
CHECK_EQUAL( 345, doubleXSHash::hashFunction( hash.getMinValue()) );
CHECK_EQUAL( 345, hash.getMinIndex() );
CHECK_EQUAL( 0, hash.getHashIndex( hash.getMinValue() ) );
CHECK_EQUAL( 9.939251007413245e-12, hash.invHashFunction( 0 ) );
//printf( "Debug exp2( ( ( 4651 + 345) / 100.0) - 40.0 ) = %20.15e \n", std::exp2( ( ( 4651 + 345) / 100.0) - 40.0 ) );
CHECK_EQUAL( 9.959986661501810e+02, hash.getMaxValue() );
CHECK_EQUAL( 4996, doubleXSHash::hashFunction( hash.getMaxValue()) );
CHECK_EQUAL( 4996, hash.getMaxIndex() );
CHECK_EQUAL( 4651, hash.getHashIndex( hash.getMaxValue()) );
CHECK_EQUAL( 9.959986661501810e+02, hash.invHashFunction( hash.getNumIndices() -1 ));
// printf( "Debug: hash.invHashFunction( 1 ) = %20.15e \n", hash.invHashFunction( 1 ) );
// printf( "Debug: hash.invHashFunction( 2 ) = %20.15e \n", hash.invHashFunction( 2 ) );
// printf( "Debug: hash.invHashFunction( 3 ) = %20.15e \n", hash.invHashFunction( 3 ) );
CHECK_CLOSE( 1.000838396532159e-11, hash.invHashFunction( 1 ), 1e-26 );
CHECK_CLOSE( 1.007799778097923e-11, hash.invHashFunction( 2 ), 1e-26);
CHECK_CLOSE( 1.014809579901632e-11, hash.invHashFunction( 3 ), 1e-26 );
CHECK_EQUAL( 37304, hash.bytesize() );
}
TEST( getHashIndex_float ) {
FloatNuclearDataVec_t testValues = { 1.0, 2.0};
floatXSHash hash(testValues);
CHECK_EQUAL( 4652, hash.size() );
//printf( "Debug exp2( ( ( 0 + 345) / 100.0) - 40.0 ) = %20.15e \n", std::exp2( ( ( 0 + 345) / 100.0f) - 40.0f ) );
CHECK_CLOSE( 9.939256015445430e-12 , hash.getMinValue(), 1e-26 );
CHECK_EQUAL( 345, floatXSHash::hashFunction( hash.getMinValue()) );
CHECK_EQUAL( 345, hash.getMinIndex() );
CHECK_EQUAL( 0, hash.getHashIndex( hash.getMinValue() ) );
CHECK_CLOSE( 9.939256015445430e-12 , hash.invHashFunction( 0 ), 1e-26 );
//printf( "Debug exp2( ( ( 4651 + 345) / 100.0) - 40.0 ) = %20.15e \n", std::exp2( ( ( 4651 + 345) / 100.0f) - 40.0f ) );
CHECK_EQUAL( 9.959980468750000e+02, hash.getMaxValue() );
CHECK_EQUAL( 4996, floatXSHash::hashFunction( 1e+3 ) );
CHECK_EQUAL( 4996, floatXSHash::hashFunction( hash.getMaxValue()) );
CHECK_EQUAL( 4996, hash.getMaxIndex() );
CHECK_EQUAL( 4651, hash.getHashIndex( hash.getMaxValue()) );
CHECK_EQUAL( 9.959980468750000e+02, hash.invHashFunction( hash.getNumIndices() -1 ));
//printf( "Debug: hash.invHashFunction( 0 ) = %20.15e \n", hash.invHashFunction( 0 ) );
//printf( "Debug: hash.invHashFunction( 1 ) = %20.15e \n", hash.invHashFunction( 1 ) );
//printf( "Debug: hash.invHashFunction( 2 ) = %20.15e \n", hash.invHashFunction( 2 ) );
//printf( "Debug: hash.invHashFunction( 3 ) = %20.15e \n", hash.invHashFunction( 3 ) );
CHECK_CLOSE( 1.000837780706920e-11, hash.invHashFunction( 1 ), 1e-26 );
CHECK_CLOSE( 1.007800613794796e-11, hash.invHashFunction( 2 ), 1e-26);
CHECK_CLOSE( 1.014809243582437e-11, hash.invHashFunction( 3 ), 1e-26 );
CHECK_EQUAL( 37288, hash.bytesize() );
}
TEST( test_all_bin_eneries_are_valid_double ) {
using T = double;
DoubleNuclearDataVec_t testValues = { 1.0, 2.0};
doubleXSHash hash(testValues);
for( unsigned i=0; i< hash.size(); ++i ){
T value = hash.invHashFunction( i );
CHECK_EQUAL( true, std::isnormal(value) );
}
}
TEST( test_all_bin_eneries_are_valid_float ) {
using T = float;
FloatNuclearDataVec_t testValues = { 1.0, 2.0};
floatXSHash hash(testValues);
for( unsigned i=0; i< hash.size(); ++i ){
T value = hash.invHashFunction( i );
CHECK_EQUAL( true, std::isnormal(value) );
}
}
TEST( testLowerBin_double ) {
using T = double;
// 0 1 2 3 4 5 6
DoubleNuclearDataVec_t testValues = { 1e-13, 5e-13, 1e-11, 1.0009e-11, 1e-10, 1.0, 1e6 };
doubleXSHash hash(testValues);
CHECK_EQUAL( 37304, hash.bytesize() );
CHECK_EQUAL( 1, hash.getBinLo( 0 ) ); // 9.939256015445430e-12
CHECK_EQUAL( 2, hash.getBinLo( 1 ) ); // 1.000837780706920e-11
CHECK_EQUAL( 3, hash.getBinLo( 2 ) ); // 1.007800613794796e-11
CHECK_EQUAL( 3, hash.getBinLo( 3 ) ); // 1.014809243582437e-11
CHECK_EQUAL( 0, hash.getIndex( 1e-14 ).first );
CHECK_EQUAL( 0, hash.getIndex( 1e-14 ).second );
T energy = 1e-13;
/* CHECK_EQUAL( 0, hash.getHashIndex(energy) ); // test failure in debug mode only */
CHECK_EQUAL( 9.939251007413245e-12, hash.getMinValue() );
CHECK_EQUAL( 0, hash.getIndex( 1e-13 ).first );
CHECK_EQUAL( 0, hash.getIndex( 1e-13 ).second );
energy = 1e-12;
CHECK_EQUAL( 0, hash.getHashIndex(energy) );
CHECK_EQUAL( 1, hash.getBinLo( 0 ) ); // 9.939256015445430e-12
CHECK_EQUAL( 2, hash.getBinLo( 1 ) ); // 1.000837780706920e-11
CHECK_EQUAL( 1, hash.getIndex( 1e-12 ).first);
CHECK_EQUAL( 3, hash.getIndex( 1e-12 ).second);
CHECK_EQUAL( 1, hash.getIndex( 1e-11 ).first);
CHECK_EQUAL( 3, hash.getIndex( 1e-11 ).second);
energy = 1.00091e-11;
CHECK_EQUAL( 1, hash.getHashIndex(energy) );
CHECK_CLOSE( 1.000837780706920e-11, hash.invHashFunction(1), 1e-15 );
CHECK_CLOSE( 1.007800613794796e-11, hash.invHashFunction(2), 1e-15 );
CHECK_EQUAL( 2, hash.getIndex( energy ).first );
CHECK_EQUAL( 4, hash.getIndex( energy ).second );
// simulate cross-section lookup using hash indices
unsigned index = std::distance( testValues.begin(),
std::upper_bound( testValues.begin()+hash.getIndex( energy ).first,
testValues.begin()+hash.getIndex( energy ).second,
energy )
);
if( index > 0 ) --index;
CHECK_EQUAL( 3, index);
CHECK_EQUAL( 5, hash.getIndex( 1e2 ).first );
CHECK_EQUAL( 6, hash.getIndex( 1e2 ).second );
CHECK_EQUAL( 4651, hash.getHashIndex( 1e3 ) );
CHECK_EQUAL( 5, hash.getIndex( 1e3 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e3 ).second );
CHECK_EQUAL( 4651, hash.getHashIndex( 1e4 ) );
CHECK_EQUAL( 5, hash.getIndex( 1e4 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e4 ).second );
energy = 1e5;
CHECK_EQUAL( 4651, hash.getHashIndex(energy) );
CHECK_EQUAL( 5, hash.getBinLo( 4651 ) );
CHECK_EQUAL( 5, hash.getIndex( energy ).first );
CHECK_EQUAL( 7, hash.getIndex( energy ).second );
CHECK_EQUAL( 4651, hash.getHashIndex( 1e6 ) );
CHECK_EQUAL( 5, hash.getIndex( 1e6 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e6 ).second );
CHECK_EQUAL( 6, hash.getIndex( 1e7 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e7 ).second );
}
TEST( testLowerBin_float ) {
using T = float;
// 0 1 2 3 4 5 6
FloatNuclearDataVec_t testValues = { 1e-13, 5e-13, 1e-11, 1.0009e-11, 1e-10, 1.0, 1e6 };
floatXSHash hash(testValues);
CHECK_EQUAL( 37288, hash.bytesize() );
CHECK_EQUAL( 1, hash.getBinLo( 0 ) ); // 9.939256015445430e-12
CHECK_EQUAL( 2, hash.getBinLo( 1 ) ); // 1.000837780706920e-11
CHECK_EQUAL( 3, hash.getBinLo( 2 ) ); // 1.007800613794796e-11
CHECK_EQUAL( 3, hash.getBinLo( 3 ) ); // 1.014809243582437e-11
CHECK_EQUAL( 0, hash.getIndex( 1e-14 ).first );
CHECK_EQUAL( 0, hash.getIndex( 1e-14 ).second );
T energy = 1e-13;
/* CHECK_EQUAL( 0, hash.getHashIndex(energy) ); */ // test failure in debug mode only
CHECK_EQUAL( 9.939256015445430e-12, hash.getMinValue() );
CHECK_EQUAL( 0, hash.getIndex( 1e-13 ).first );
CHECK_EQUAL( 0, hash.getIndex( 1e-13 ).second );
energy = 1e-12;
CHECK_EQUAL( 0, hash.getHashIndex(energy) );
CHECK_EQUAL( 1, hash.getBinLo( 0 ) ); // 9.939256015445430e-12
CHECK_EQUAL( 2, hash.getBinLo( 1 ) ); // 1.000837780706920e-11
CHECK_EQUAL( 1, hash.getIndex( 1e-12 ).first);
CHECK_EQUAL( 3, hash.getIndex( 1e-12 ).second);
CHECK_EQUAL( 1, hash.getIndex( 1e-11 ).first);
CHECK_EQUAL( 3, hash.getIndex( 1e-11 ).second);
energy = 1.00091e-11;
CHECK_EQUAL( 1, hash.getHashIndex(energy) );
CHECK_CLOSE( 1.000837780706920e-11, hash.invHashFunction(1), 1e-15 );
CHECK_CLOSE( 1.007800613794796e-11, hash.invHashFunction(2), 1e-15 );
CHECK_EQUAL( 2, hash.getIndex( energy ).first );
CHECK_EQUAL( 4, hash.getIndex( energy ).second );
// simulate cross-section lookup using hash indices
unsigned index = std::distance( testValues.begin(),
std::upper_bound( testValues.begin()+hash.getIndex( energy ).first,
testValues.begin()+hash.getIndex( energy ).second,
energy )
);
if( index > 0 ) --index;
CHECK_EQUAL( 3, index);
CHECK_EQUAL( 5, hash.getIndex( 1e2 ).first );
CHECK_EQUAL( 6, hash.getIndex( 1e2 ).second );
CHECK_EQUAL( 4651, hash.getHashIndex( 1e3 ) );
CHECK_EQUAL( 5, hash.getIndex( 1e3 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e3 ).second );
CHECK_EQUAL( 4651, hash.getHashIndex( 1e4 ) );
CHECK_EQUAL( 5, hash.getIndex( 1e4 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e4 ).second );
energy = 1e5;
CHECK_EQUAL( 4651, hash.getHashIndex(energy) );
CHECK_EQUAL( 5, hash.getBinLo( 4651 ) );
CHECK_EQUAL( 5, hash.getIndex( energy ).first );
CHECK_EQUAL( 7, hash.getIndex( energy ).second );
CHECK_EQUAL( 4651, hash.getHashIndex( 1e6 ) );
CHECK_EQUAL( 5, hash.getIndex( 1e6 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e6 ).second );
CHECK_EQUAL( 6, hash.getIndex( 1e7 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e7 ).second );
}
TEST( test_bytesize_with_larger_array ) {
using T = double;
DoubleNuclearDataVec_t testValues = { 1e-13, 5e-13, 1e-11, 1.0005e-11, 1e-10, 1e-9, 1e-8, 1e-5, 1.0, 1e6 };
doubleXSHash hash(testValues);
// doesn't change based on the array size- fixed size
CHECK_EQUAL( 37304, hash.bytesize() );
}
TEST( energies_stop_before_end_of_hashTable ) {
using T = double;
// 0 1 2 3 4 5 6 7
DoubleNuclearDataVec_t testValues = { 1e-13, 5e-13, 1e-11, 1.0009e-11, 1e-10, .1, 1.0, 20 };
doubleXSHash hash(testValues);
double energy = .5;
CHECK_EQUAL( 3555, hash.getHashIndex(energy) );
CHECK_EQUAL( 5, hash.getBinLo( 3555 ) );
CHECK_EQUAL( 5, hash.getIndex( energy ).first );
CHECK_EQUAL( 6, hash.getIndex( energy ).second );
energy = 1.5;
CHECK_EQUAL( 3713, hash.getHashIndex(energy) );
CHECK_EQUAL( 6, hash.getBinLo( 3713 ) );
CHECK_EQUAL( 6, hash.getIndex( energy ).first );
CHECK_EQUAL( 7, hash.getIndex( energy ).second );
energy = 20.0;
CHECK_EQUAL( 4087, hash.getHashIndex(energy) );
CHECK_EQUAL( 6, hash.getBinLo( 4087 ) );
CHECK_CLOSE( 19.9733, hash.invHashFunction(4087), 1e-4 );
CHECK_CLOSE( 20.1122, hash.invHashFunction(4088), 1e-4 );
CHECK_EQUAL( 6, hash.getIndex( energy ).first );
CHECK_EQUAL( 8, hash.getIndex( energy ).second );
energy = 100.0;
CHECK_EQUAL( 4319, hash.getHashIndex(energy) );
CHECK_EQUAL( 7, hash.getBinLo( 4319 ) );
CHECK_EQUAL( 7, hash.getIndex( energy ).first );
CHECK_EQUAL( 8, hash.getIndex( energy ).second );
energy = 1000.0;
CHECK_EQUAL( 4651, hash.getHashIndex(energy) );
CHECK_EQUAL( 7, hash.getBinLo( 4651 ) );
CHECK_EQUAL( 7, hash.getIndex( energy ).first );
CHECK_EQUAL( 8, hash.getIndex( energy ).second );
}
TEST( energies_start_after_beginning_of_hashTable ) {
using T = double;
// 0 1 2 3
DoubleNuclearDataVec_t testValues = { 1e-9, .1, 1.0, 20 };
doubleXSHash hash(testValues);
double energy = 1e-12;
CHECK_EQUAL( 0, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 0 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 0, hash.getIndex( energy ).second );
energy = 1e-11;
CHECK_EQUAL( 0, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 0 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 0, hash.getIndex( energy ).second );
energy = 1.1e-11;
CHECK_EQUAL( 14, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 14 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 0, hash.getIndex( energy ).second );
energy = 1.2e-11;
CHECK_EQUAL( 27, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 27 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 0, hash.getIndex( energy ).second );
energy = 1.0e-10;
CHECK_EQUAL( 333, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 333 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 0, hash.getIndex( energy ).second );
energy = 1.1e-9;
CHECK_EQUAL( 679, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 679 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 1, hash.getIndex( energy ).second );
energy = 0.01;
CHECK_EQUAL( 2990, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 2990 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 1, hash.getIndex( energy ).second );
energy = 0.15;
CHECK_EQUAL( 3381, hash.getHashIndex(energy) );
CHECK_EQUAL( 1, hash.getBinLo( 3381 ) );
CHECK_EQUAL( 1, hash.getIndex( energy ).first );
CHECK_EQUAL( 2, hash.getIndex( energy ).second );
}
}
} // end namespace
| 89c027661e751880aa0dada169a310df70448669.cu | #include <UnitTest++.h>
#include <vector>
#include <cmath>
#ifdef __CUDACC__
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#endif
#include "CrossSectionHash.hh"
namespace Log2Hash_tester_namespace {
using namespace MonteRay;
class testLog2Hash {
public:
CUDA_CALLABLE_MEMBER
static size_t hashFunction(const double value) {
// For double
// shifts the bits and returns binary equivalent integer
//std::cout << "Debug -- Calling hashFunction(double)\n";
MONTERAY_ASSERT_MSG( value >= 0.0, "Negative values are not allowed.");
std::uint64_t i = (( std::log2(value)+40.0)*100.0);
return i;
}
CUDA_CALLABLE_MEMBER
static size_t hashFunction(const float value) {
// For float
// shifts the bits and returns binary equivalent integer
//std::cout << "Debug -- Calling hashFunction(float)\n";
MONTERAY_ASSERT_MSG( value >= 0.0f, "Negative values are not allowed.");
std::uint64_t i = (( std::log2(value)+40.0f)*100.0f);
return i;
}
template < typename TV, typename std::enable_if< sizeof(TV) == 8 >::type* = nullptr >
CUDA_CALLABLE_MEMBER
static
TV invHashFunction( const size_t index, const size_t minIndex = 0 ) {
//std::cout << "Debug -- Calling invHashFunction(index)->double\n";
TV value = std::exp2( ((index + minIndex) / 100.0 ) - 40.0 );
return value;
}
template < typename TV, typename std::enable_if< sizeof(TV) == 4 >::type* = nullptr >
CUDA_CALLABLE_MEMBER
static
TV invHashFunction( const size_t index, const size_t minIndex = 0 ) {
//std::cout << "Debug -- Calling invHashFunction(index)->float\n";
TV value = std::exp2( ((index + minIndex) / 100.0f ) - 40.0f );
return value;
}
};
SUITE( Log2Hash_tester ) {
using doubleXSHash = CrossSectionHash_t<double, testLog2Hash>;
using floatXSHash = CrossSectionHash_t<float, testLog2Hash>;
typedef doubleXSHash::NuclearData_t DoubleNuclearData_t;
typedef std::vector<double> DoubleNuclearDataVec_t;
typedef doubleXSHash::Index_t DoubleIndex_t;
typedef floatXSHash::NuclearData_t FloatNuclearData_t;
typedef std::vector<float> FloatNuclearDataVec_t;
typedef floatXSHash::Index_t FloatIndex_t;
TEST( ctor_w_double ) {
DoubleNuclearDataVec_t testValues = { 1.0, 2.0};
doubleXSHash hash(testValues);
double min = hash.getMinValue();
double max = hash.getMaxValue();
CHECK_EQUAL( 345, size_t( (std::log2( 1e-11 ) + 40)*100 ) );
CHECK_EQUAL( 678, size_t( (std::log2( 1e-10 ) + 40)*100 ) );
CHECK_EQUAL( 4996, size_t( (std::log2( 1e+3 ) + 40)*100 ) );
CHECK_EQUAL( 4996-345 + 1, hash.size() );
CHECK_EQUAL( 4652, hash.size() );
CHECK_EQUAL( 345, hash.getMinIndex() );
CHECK_EQUAL( 4651, hash.getHashIndex( hash.getMaxValue()) );
//printf( "Debug exp2( ( ( 0 + 345) / 100.0) - 40.0 ) = %20.15e \n", std::exp2( ( ( 0 + 345) / 100.0) - 40.0 ) );
CHECK_EQUAL( 9.939251007413245e-12 , testLog2Hash::invHashFunction<double>( 0, 345 ) );
CHECK_EQUAL( 9.939251007413245e-12 , hash.invHashFunction( 0 ) );
}
TEST( ctor_w_float ) {
FloatNuclearDataVec_t testValues = { 1.0, 2.0};
doubleXSHash hash(testValues);
CHECK_EQUAL( 4652, hash.size() );
}
TEST( getHashIndex_double ) {
DoubleNuclearDataVec_t testValues = { 1.0, 2.0};
doubleXSHash hash(testValues);
CHECK_EQUAL( 9.939251007413245e-12 , hash.getMinValue() );
CHECK_EQUAL( 345, doubleXSHash::hashFunction( hash.getMinValue()) );
CHECK_EQUAL( 345, hash.getMinIndex() );
CHECK_EQUAL( 0, hash.getHashIndex( hash.getMinValue() ) );
CHECK_EQUAL( 9.939251007413245e-12, hash.invHashFunction( 0 ) );
//printf( "Debug exp2( ( ( 4651 + 345) / 100.0) - 40.0 ) = %20.15e \n", std::exp2( ( ( 4651 + 345) / 100.0) - 40.0 ) );
CHECK_EQUAL( 9.959986661501810e+02, hash.getMaxValue() );
CHECK_EQUAL( 4996, doubleXSHash::hashFunction( hash.getMaxValue()) );
CHECK_EQUAL( 4996, hash.getMaxIndex() );
CHECK_EQUAL( 4651, hash.getHashIndex( hash.getMaxValue()) );
CHECK_EQUAL( 9.959986661501810e+02, hash.invHashFunction( hash.getNumIndices() -1 ));
// printf( "Debug: hash.invHashFunction( 1 ) = %20.15e \n", hash.invHashFunction( 1 ) );
// printf( "Debug: hash.invHashFunction( 2 ) = %20.15e \n", hash.invHashFunction( 2 ) );
// printf( "Debug: hash.invHashFunction( 3 ) = %20.15e \n", hash.invHashFunction( 3 ) );
CHECK_CLOSE( 1.000838396532159e-11, hash.invHashFunction( 1 ), 1e-26 );
CHECK_CLOSE( 1.007799778097923e-11, hash.invHashFunction( 2 ), 1e-26);
CHECK_CLOSE( 1.014809579901632e-11, hash.invHashFunction( 3 ), 1e-26 );
CHECK_EQUAL( 37304, hash.bytesize() );
}
TEST( getHashIndex_float ) {
FloatNuclearDataVec_t testValues = { 1.0, 2.0};
floatXSHash hash(testValues);
CHECK_EQUAL( 4652, hash.size() );
//printf( "Debug exp2( ( ( 0 + 345) / 100.0) - 40.0 ) = %20.15e \n", std::exp2( ( ( 0 + 345) / 100.0f) - 40.0f ) );
CHECK_CLOSE( 9.939256015445430e-12 , hash.getMinValue(), 1e-26 );
CHECK_EQUAL( 345, floatXSHash::hashFunction( hash.getMinValue()) );
CHECK_EQUAL( 345, hash.getMinIndex() );
CHECK_EQUAL( 0, hash.getHashIndex( hash.getMinValue() ) );
CHECK_CLOSE( 9.939256015445430e-12 , hash.invHashFunction( 0 ), 1e-26 );
//printf( "Debug exp2( ( ( 4651 + 345) / 100.0) - 40.0 ) = %20.15e \n", std::exp2( ( ( 4651 + 345) / 100.0f) - 40.0f ) );
CHECK_EQUAL( 9.959980468750000e+02, hash.getMaxValue() );
CHECK_EQUAL( 4996, floatXSHash::hashFunction( 1e+3 ) );
CHECK_EQUAL( 4996, floatXSHash::hashFunction( hash.getMaxValue()) );
CHECK_EQUAL( 4996, hash.getMaxIndex() );
CHECK_EQUAL( 4651, hash.getHashIndex( hash.getMaxValue()) );
CHECK_EQUAL( 9.959980468750000e+02, hash.invHashFunction( hash.getNumIndices() -1 ));
//printf( "Debug: hash.invHashFunction( 0 ) = %20.15e \n", hash.invHashFunction( 0 ) );
//printf( "Debug: hash.invHashFunction( 1 ) = %20.15e \n", hash.invHashFunction( 1 ) );
//printf( "Debug: hash.invHashFunction( 2 ) = %20.15e \n", hash.invHashFunction( 2 ) );
//printf( "Debug: hash.invHashFunction( 3 ) = %20.15e \n", hash.invHashFunction( 3 ) );
CHECK_CLOSE( 1.000837780706920e-11, hash.invHashFunction( 1 ), 1e-26 );
CHECK_CLOSE( 1.007800613794796e-11, hash.invHashFunction( 2 ), 1e-26);
CHECK_CLOSE( 1.014809243582437e-11, hash.invHashFunction( 3 ), 1e-26 );
CHECK_EQUAL( 37288, hash.bytesize() );
}
TEST( test_all_bin_eneries_are_valid_double ) {
using T = double;
DoubleNuclearDataVec_t testValues = { 1.0, 2.0};
doubleXSHash hash(testValues);
for( unsigned i=0; i< hash.size(); ++i ){
T value = hash.invHashFunction( i );
CHECK_EQUAL( true, std::isnormal(value) );
}
}
TEST( test_all_bin_eneries_are_valid_float ) {
using T = float;
FloatNuclearDataVec_t testValues = { 1.0, 2.0};
floatXSHash hash(testValues);
for( unsigned i=0; i< hash.size(); ++i ){
T value = hash.invHashFunction( i );
CHECK_EQUAL( true, std::isnormal(value) );
}
}
TEST( testLowerBin_double ) {
using T = double;
// 0 1 2 3 4 5 6
DoubleNuclearDataVec_t testValues = { 1e-13, 5e-13, 1e-11, 1.0009e-11, 1e-10, 1.0, 1e6 };
doubleXSHash hash(testValues);
CHECK_EQUAL( 37304, hash.bytesize() );
CHECK_EQUAL( 1, hash.getBinLo( 0 ) ); // 9.939256015445430e-12
CHECK_EQUAL( 2, hash.getBinLo( 1 ) ); // 1.000837780706920e-11
CHECK_EQUAL( 3, hash.getBinLo( 2 ) ); // 1.007800613794796e-11
CHECK_EQUAL( 3, hash.getBinLo( 3 ) ); // 1.014809243582437e-11
CHECK_EQUAL( 0, hash.getIndex( 1e-14 ).first );
CHECK_EQUAL( 0, hash.getIndex( 1e-14 ).second );
T energy = 1e-13;
/* CHECK_EQUAL( 0, hash.getHashIndex(energy) ); // test failure in debug mode only */
CHECK_EQUAL( 9.939251007413245e-12, hash.getMinValue() );
CHECK_EQUAL( 0, hash.getIndex( 1e-13 ).first );
CHECK_EQUAL( 0, hash.getIndex( 1e-13 ).second );
energy = 1e-12;
CHECK_EQUAL( 0, hash.getHashIndex(energy) );
CHECK_EQUAL( 1, hash.getBinLo( 0 ) ); // 9.939256015445430e-12
CHECK_EQUAL( 2, hash.getBinLo( 1 ) ); // 1.000837780706920e-11
CHECK_EQUAL( 1, hash.getIndex( 1e-12 ).first);
CHECK_EQUAL( 3, hash.getIndex( 1e-12 ).second);
CHECK_EQUAL( 1, hash.getIndex( 1e-11 ).first);
CHECK_EQUAL( 3, hash.getIndex( 1e-11 ).second);
energy = 1.00091e-11;
CHECK_EQUAL( 1, hash.getHashIndex(energy) );
CHECK_CLOSE( 1.000837780706920e-11, hash.invHashFunction(1), 1e-15 );
CHECK_CLOSE( 1.007800613794796e-11, hash.invHashFunction(2), 1e-15 );
CHECK_EQUAL( 2, hash.getIndex( energy ).first );
CHECK_EQUAL( 4, hash.getIndex( energy ).second );
// simulate cross-section lookup using hash indices
unsigned index = std::distance( testValues.begin(),
std::upper_bound( testValues.begin()+hash.getIndex( energy ).first,
testValues.begin()+hash.getIndex( energy ).second,
energy )
);
if( index > 0 ) --index;
CHECK_EQUAL( 3, index);
CHECK_EQUAL( 5, hash.getIndex( 1e2 ).first );
CHECK_EQUAL( 6, hash.getIndex( 1e2 ).second );
CHECK_EQUAL( 4651, hash.getHashIndex( 1e3 ) );
CHECK_EQUAL( 5, hash.getIndex( 1e3 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e3 ).second );
CHECK_EQUAL( 4651, hash.getHashIndex( 1e4 ) );
CHECK_EQUAL( 5, hash.getIndex( 1e4 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e4 ).second );
energy = 1e5;
CHECK_EQUAL( 4651, hash.getHashIndex(energy) );
CHECK_EQUAL( 5, hash.getBinLo( 4651 ) );
CHECK_EQUAL( 5, hash.getIndex( energy ).first );
CHECK_EQUAL( 7, hash.getIndex( energy ).second );
CHECK_EQUAL( 4651, hash.getHashIndex( 1e6 ) );
CHECK_EQUAL( 5, hash.getIndex( 1e6 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e6 ).second );
CHECK_EQUAL( 6, hash.getIndex( 1e7 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e7 ).second );
}
TEST( testLowerBin_float ) {
using T = float;
// 0 1 2 3 4 5 6
FloatNuclearDataVec_t testValues = { 1e-13, 5e-13, 1e-11, 1.0009e-11, 1e-10, 1.0, 1e6 };
floatXSHash hash(testValues);
CHECK_EQUAL( 37288, hash.bytesize() );
CHECK_EQUAL( 1, hash.getBinLo( 0 ) ); // 9.939256015445430e-12
CHECK_EQUAL( 2, hash.getBinLo( 1 ) ); // 1.000837780706920e-11
CHECK_EQUAL( 3, hash.getBinLo( 2 ) ); // 1.007800613794796e-11
CHECK_EQUAL( 3, hash.getBinLo( 3 ) ); // 1.014809243582437e-11
CHECK_EQUAL( 0, hash.getIndex( 1e-14 ).first );
CHECK_EQUAL( 0, hash.getIndex( 1e-14 ).second );
T energy = 1e-13;
/* CHECK_EQUAL( 0, hash.getHashIndex(energy) ); */ // test failure in debug mode only
CHECK_EQUAL( 9.939256015445430e-12, hash.getMinValue() );
CHECK_EQUAL( 0, hash.getIndex( 1e-13 ).first );
CHECK_EQUAL( 0, hash.getIndex( 1e-13 ).second );
energy = 1e-12;
CHECK_EQUAL( 0, hash.getHashIndex(energy) );
CHECK_EQUAL( 1, hash.getBinLo( 0 ) ); // 9.939256015445430e-12
CHECK_EQUAL( 2, hash.getBinLo( 1 ) ); // 1.000837780706920e-11
CHECK_EQUAL( 1, hash.getIndex( 1e-12 ).first);
CHECK_EQUAL( 3, hash.getIndex( 1e-12 ).second);
CHECK_EQUAL( 1, hash.getIndex( 1e-11 ).first);
CHECK_EQUAL( 3, hash.getIndex( 1e-11 ).second);
energy = 1.00091e-11;
CHECK_EQUAL( 1, hash.getHashIndex(energy) );
CHECK_CLOSE( 1.000837780706920e-11, hash.invHashFunction(1), 1e-15 );
CHECK_CLOSE( 1.007800613794796e-11, hash.invHashFunction(2), 1e-15 );
CHECK_EQUAL( 2, hash.getIndex( energy ).first );
CHECK_EQUAL( 4, hash.getIndex( energy ).second );
// simulate cross-section lookup using hash indices
unsigned index = std::distance( testValues.begin(),
std::upper_bound( testValues.begin()+hash.getIndex( energy ).first,
testValues.begin()+hash.getIndex( energy ).second,
energy )
);
if( index > 0 ) --index;
CHECK_EQUAL( 3, index);
CHECK_EQUAL( 5, hash.getIndex( 1e2 ).first );
CHECK_EQUAL( 6, hash.getIndex( 1e2 ).second );
CHECK_EQUAL( 4651, hash.getHashIndex( 1e3 ) );
CHECK_EQUAL( 5, hash.getIndex( 1e3 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e3 ).second );
CHECK_EQUAL( 4651, hash.getHashIndex( 1e4 ) );
CHECK_EQUAL( 5, hash.getIndex( 1e4 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e4 ).second );
energy = 1e5;
CHECK_EQUAL( 4651, hash.getHashIndex(energy) );
CHECK_EQUAL( 5, hash.getBinLo( 4651 ) );
CHECK_EQUAL( 5, hash.getIndex( energy ).first );
CHECK_EQUAL( 7, hash.getIndex( energy ).second );
CHECK_EQUAL( 4651, hash.getHashIndex( 1e6 ) );
CHECK_EQUAL( 5, hash.getIndex( 1e6 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e6 ).second );
CHECK_EQUAL( 6, hash.getIndex( 1e7 ).first );
CHECK_EQUAL( 7, hash.getIndex( 1e7 ).second );
}
TEST( test_bytesize_with_larger_array ) {
using T = double;
DoubleNuclearDataVec_t testValues = { 1e-13, 5e-13, 1e-11, 1.0005e-11, 1e-10, 1e-9, 1e-8, 1e-5, 1.0, 1e6 };
doubleXSHash hash(testValues);
// doesn't change based on the array size- fixed size
CHECK_EQUAL( 37304, hash.bytesize() );
}
TEST( energies_stop_before_end_of_hashTable ) {
using T = double;
// 0 1 2 3 4 5 6 7
DoubleNuclearDataVec_t testValues = { 1e-13, 5e-13, 1e-11, 1.0009e-11, 1e-10, .1, 1.0, 20 };
doubleXSHash hash(testValues);
double energy = .5;
CHECK_EQUAL( 3555, hash.getHashIndex(energy) );
CHECK_EQUAL( 5, hash.getBinLo( 3555 ) );
CHECK_EQUAL( 5, hash.getIndex( energy ).first );
CHECK_EQUAL( 6, hash.getIndex( energy ).second );
energy = 1.5;
CHECK_EQUAL( 3713, hash.getHashIndex(energy) );
CHECK_EQUAL( 6, hash.getBinLo( 3713 ) );
CHECK_EQUAL( 6, hash.getIndex( energy ).first );
CHECK_EQUAL( 7, hash.getIndex( energy ).second );
energy = 20.0;
CHECK_EQUAL( 4087, hash.getHashIndex(energy) );
CHECK_EQUAL( 6, hash.getBinLo( 4087 ) );
CHECK_CLOSE( 19.9733, hash.invHashFunction(4087), 1e-4 );
CHECK_CLOSE( 20.1122, hash.invHashFunction(4088), 1e-4 );
CHECK_EQUAL( 6, hash.getIndex( energy ).first );
CHECK_EQUAL( 8, hash.getIndex( energy ).second );
energy = 100.0;
CHECK_EQUAL( 4319, hash.getHashIndex(energy) );
CHECK_EQUAL( 7, hash.getBinLo( 4319 ) );
CHECK_EQUAL( 7, hash.getIndex( energy ).first );
CHECK_EQUAL( 8, hash.getIndex( energy ).second );
energy = 1000.0;
CHECK_EQUAL( 4651, hash.getHashIndex(energy) );
CHECK_EQUAL( 7, hash.getBinLo( 4651 ) );
CHECK_EQUAL( 7, hash.getIndex( energy ).first );
CHECK_EQUAL( 8, hash.getIndex( energy ).second );
}
TEST( energies_start_after_beginning_of_hashTable ) {
using T = double;
// 0 1 2 3
DoubleNuclearDataVec_t testValues = { 1e-9, .1, 1.0, 20 };
doubleXSHash hash(testValues);
double energy = 1e-12;
CHECK_EQUAL( 0, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 0 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 0, hash.getIndex( energy ).second );
energy = 1e-11;
CHECK_EQUAL( 0, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 0 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 0, hash.getIndex( energy ).second );
energy = 1.1e-11;
CHECK_EQUAL( 14, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 14 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 0, hash.getIndex( energy ).second );
energy = 1.2e-11;
CHECK_EQUAL( 27, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 27 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 0, hash.getIndex( energy ).second );
energy = 1.0e-10;
CHECK_EQUAL( 333, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 333 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 0, hash.getIndex( energy ).second );
energy = 1.1e-9;
CHECK_EQUAL( 679, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 679 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 1, hash.getIndex( energy ).second );
energy = 0.01;
CHECK_EQUAL( 2990, hash.getHashIndex(energy) );
CHECK_EQUAL( 0, hash.getBinLo( 2990 ) );
CHECK_EQUAL( 0, hash.getIndex( energy ).first );
CHECK_EQUAL( 1, hash.getIndex( energy ).second );
energy = 0.15;
CHECK_EQUAL( 3381, hash.getHashIndex(energy) );
CHECK_EQUAL( 1, hash.getBinLo( 3381 ) );
CHECK_EQUAL( 1, hash.getIndex( energy ).first );
CHECK_EQUAL( 2, hash.getIndex( energy ).second );
}
}
} // end namespace
|
507213c1c12f8726407fb4c16d38cf954ec71ac6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ static void solveFull ( double* data, double* inv1, double* inv2, const int nx, const int nBatch )
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set values to last two entries in array
double oldNx2 = data[(nx - 2) * nBatch + globalIdx]; // Two points from end
double oldNx1 = data[(nx - 1) * nBatch + globalIdx]; // One point from end
// Set index being computed
int index = globalIdy * nBatch + globalIdx;
if (globalIdy < nx - 2)
{
data[index] = data[index] - (inv1[index] * oldNx2 + inv2[index] * oldNx1);
}
} | 507213c1c12f8726407fb4c16d38cf954ec71ac6.cu | #include "includes.h"
__global__ static void solveFull ( double* data, double* inv1, double* inv2, const int nx, const int nBatch )
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set values to last two entries in array
double oldNx2 = data[(nx - 2) * nBatch + globalIdx]; // Two points from end
double oldNx1 = data[(nx - 1) * nBatch + globalIdx]; // One point from end
// Set index being computed
int index = globalIdy * nBatch + globalIdx;
if (globalIdy < nx - 2)
{
data[index] = data[index] - (inv1[index] * oldNx2 + inv2[index] * oldNx1);
}
} |
0d12065de09b79aaeb4a181006b28815e5938bf9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "conv.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *t = NULL;
hipMalloc(&t, XSIZE*YSIZE);
float *tk = NULL;
hipMalloc(&tk, XSIZE*YSIZE);
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int t_rows = 1;
int t_columns = 1;
int n_channels = 1;
int k_rows = 1;
int k_columns = 1;
int n_kernels = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
conv), dim3(gridBlock),dim3(threadBlock), 0, 0, t,tk,out,t_rows,t_columns,n_channels,k_rows,k_columns,n_kernels);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
conv), dim3(gridBlock),dim3(threadBlock), 0, 0, t,tk,out,t_rows,t_columns,n_channels,k_rows,k_columns,n_kernels);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
conv), dim3(gridBlock),dim3(threadBlock), 0, 0, t,tk,out,t_rows,t_columns,n_channels,k_rows,k_columns,n_kernels);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0d12065de09b79aaeb4a181006b28815e5938bf9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "conv.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *t = NULL;
cudaMalloc(&t, XSIZE*YSIZE);
float *tk = NULL;
cudaMalloc(&tk, XSIZE*YSIZE);
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int t_rows = 1;
int t_columns = 1;
int n_channels = 1;
int k_rows = 1;
int k_columns = 1;
int n_kernels = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
conv<<<gridBlock,threadBlock>>>(t,tk,out,t_rows,t_columns,n_channels,k_rows,k_columns,n_kernels);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
conv<<<gridBlock,threadBlock>>>(t,tk,out,t_rows,t_columns,n_channels,k_rows,k_columns,n_kernels);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
conv<<<gridBlock,threadBlock>>>(t,tk,out,t_rows,t_columns,n_channels,k_rows,k_columns,n_kernels);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
44959a0b06e90d9d2291d2d60c2a4542cce225b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void reduction(const int N, float *a, float *result) {
int thread = threadIdx.x;
int block = blockIdx.x;
int blockSize = blockDim.x;
int gridSize = gridDim.x;
//unique global thread ID
int id = thread + block*blockSize;
__volatile__ __shared__ float s_sum[256];
float sum = 0;
for (int i=0; i<4; i++){
if(id+i*blockSize*gridSize<N){
sum += a[id+i*blockSize*gridSize]; //add the thread's id to start
}
}
s_sum[thread] = sum;
__syncthreads(); //make sure the write to shared is finished
if (thread<128) {//first half
s_sum[thread] += s_sum[thread+128];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<64) {//next half
s_sum[thread] += s_sum[thread+64];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<32) {//next half
s_sum[thread] += s_sum[thread+32];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<16) {//next half
s_sum[thread] += s_sum[thread+16];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<8) {//next half
s_sum[thread] += s_sum[thread+8];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<4) {//next half
s_sum[thread] += s_sum[thread+4];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<2) {//next half
s_sum[thread] += s_sum[thread+2];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<1) {//final piece
s_sum[thread] += s_sum[thread+1];
result[block] = s_sum[thread];
}
} | 44959a0b06e90d9d2291d2d60c2a4542cce225b2.cu | #include "includes.h"
__global__ void reduction(const int N, float *a, float *result) {
int thread = threadIdx.x;
int block = blockIdx.x;
int blockSize = blockDim.x;
int gridSize = gridDim.x;
//unique global thread ID
int id = thread + block*blockSize;
__volatile__ __shared__ float s_sum[256];
float sum = 0;
for (int i=0; i<4; i++){
if(id+i*blockSize*gridSize<N){
sum += a[id+i*blockSize*gridSize]; //add the thread's id to start
}
}
s_sum[thread] = sum;
__syncthreads(); //make sure the write to shared is finished
if (thread<128) {//first half
s_sum[thread] += s_sum[thread+128];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<64) {//next half
s_sum[thread] += s_sum[thread+64];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<32) {//next half
s_sum[thread] += s_sum[thread+32];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<16) {//next half
s_sum[thread] += s_sum[thread+16];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<8) {//next half
s_sum[thread] += s_sum[thread+8];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<4) {//next half
s_sum[thread] += s_sum[thread+4];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<2) {//next half
s_sum[thread] += s_sum[thread+2];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<1) {//final piece
s_sum[thread] += s_sum[thread+1];
result[block] = s_sum[thread];
}
} |
5c12fb6904811e4275b8508d41bf4b8cbb35d53d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
__global__
void add(int n, float *x, float *y, float *z)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){
z[i] = x[i] + y[i];
}
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
double timeRet=(double)tp.tv_sec + (double)tp.tv_usec*1e-6;
return timeRet;
}
int main(int argc, char **argv)
{
int N = 1 << 24;
printf("N is %d\n", N);
float*x = (float*)malloc(N*sizeof(float));
float*y = (float*)malloc(N*sizeof(float));
float*z = (float*)malloc(N*sizeof(float));
float *zcpu = (float*)malloc(N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = i;
y[i] = 42-i;
}
#if 0
double cpuStart=cpuSecond();
for (int i = 0; i < N; i++) {
zcpu[i] = x[i] + y[i];
}
double cpuEnd=cpuSecond();
double cpuElapse = cpuEnd - cpuStart;
printf("Time take on CPU %f s\n", cpuElapse);
#endif
// note the reference of a *pointer*!
float *x_c, *y_c, *z_c;
hipMalloc(&x_c, N*sizeof(float));
hipMalloc(&y_c, N*sizeof(float));
hipMalloc(&z_c, N*sizeof(float));
// copy host memory over to GPU
hipMemcpy(x_c, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y_c, y, N*sizeof(float), hipMemcpyHostToDevice);
// add vectors together
int Nthreads = atoi(argv[1]);
dim3 threadsPerBlock(Nthreads,1,1);
dim3 blocks((N+Nthreads-1)/Nthreads,1,1);
double iStart=cpuSecond();
hipLaunchKernelGGL(( add) , dim3(blocks), dim3(threadsPerBlock) , 0, 0, N, x_c, y_c, z_c);
// copy result z_c back to CPU (in z)
hipMemcpy(z, z_c, N*sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
double iStop=cpuSecond();
// check result
double elapsed=iStop - iStart;
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(z[i]-42.f));
printf("Max error: %f\n", maxError);
printf("AddVec Kernel on GPU with <<<%d, %d>>> and Elapsed time is %f sec \n", blocks.x, threadsPerBlock.x, elapsed);
// free memory on both CPU and GPU
hipFree(x_c);
hipFree(y_c);
free(x);
free(y);
}
| 5c12fb6904811e4275b8508d41bf4b8cbb35d53d.cu | #include <stdio.h>
#include <sys/time.h>
__global__
void add(int n, float *x, float *y, float *z)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){
z[i] = x[i] + y[i];
}
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
double timeRet=(double)tp.tv_sec + (double)tp.tv_usec*1e-6;
return timeRet;
}
int main(int argc, char **argv)
{
int N = 1 << 24;
printf("N is %d\n", N);
float*x = (float*)malloc(N*sizeof(float));
float*y = (float*)malloc(N*sizeof(float));
float*z = (float*)malloc(N*sizeof(float));
float *zcpu = (float*)malloc(N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = i;
y[i] = 42-i;
}
#if 0
double cpuStart=cpuSecond();
for (int i = 0; i < N; i++) {
zcpu[i] = x[i] + y[i];
}
double cpuEnd=cpuSecond();
double cpuElapse = cpuEnd - cpuStart;
printf("Time take on CPU %f s\n", cpuElapse);
#endif
// note the reference of a *pointer*!
float *x_c, *y_c, *z_c;
cudaMalloc(&x_c, N*sizeof(float));
cudaMalloc(&y_c, N*sizeof(float));
cudaMalloc(&z_c, N*sizeof(float));
// copy host memory over to GPU
cudaMemcpy(x_c, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y_c, y, N*sizeof(float), cudaMemcpyHostToDevice);
// add vectors together
int Nthreads = atoi(argv[1]);
dim3 threadsPerBlock(Nthreads,1,1);
dim3 blocks((N+Nthreads-1)/Nthreads,1,1);
double iStart=cpuSecond();
add <<< blocks, threadsPerBlock >>> (N, x_c, y_c, z_c);
// copy result z_c back to CPU (in z)
cudaMemcpy(z, z_c, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
double iStop=cpuSecond();
// check result
double elapsed=iStop - iStart;
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(z[i]-42.f));
printf("Max error: %f\n", maxError);
printf("AddVec Kernel on GPU with <<<%d, %d>>> and Elapsed time is %f sec \n", blocks.x, threadsPerBlock.x, elapsed);
// free memory on both CPU and GPU
cudaFree(x_c);
cudaFree(y_c);
free(x);
free(y);
}
|
3afc8ec52e0bcac102f1a18d27feb105b5fb3b73.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
thrust::device_vector<int> dv_in(idata, idata + n);
thrust::device_vector<int> dv_out = thrust::device_vector<int>(n, 0);
timer().startGpuTimer();
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy(dv_out.begin(), dv_out.end(), odata);
}
}
}
| 3afc8ec52e0bcac102f1a18d27feb105b5fb3b73.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
thrust::device_vector<int> dv_in(idata, idata + n);
thrust::device_vector<int> dv_out = thrust::device_vector<int>(n, 0);
timer().startGpuTimer();
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy(dv_out.begin(), dv_out.end(), odata);
}
}
}
|
d68f62c0e68c9b574a535c6c875ccd6d834738f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Cambricon Corporation: 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
| d68f62c0e68c9b574a535c6c875ccd6d834738f8.cu | /*
All modification made by Cambricon Corporation: © 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
|
27f026f76d3041a2d3b06b7325a753295e57ffbe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* PVM_single_c.cu
Tim Behrens, Saad Jbabdi, Stam Sotiropoulos, Moises Hernandez - FMRIB Image Analysis Group
Copyright (C) 2005 University of Oxford */
/* Part of FSL - FMRIB's Software Library
http://www.fmrib.ox.ac.uk/fsl
fsl@fmrib.ox.ac.uk
Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance
Imaging of the Brain), Department of Clinical Neurology, Oxford
University, Oxford, UK
LICENCE
FMRIB Software Library, Release 6.0 (c) 2018, The University of
Oxford (the "Software")
The Software remains the property of the Oxford University Innovation
("the University").
The Software is distributed "AS IS" under this Licence solely for
non-commercial use in the hope that it will be useful, but in order
that the University as a charitable foundation protects its assets for
the benefit of its educational and research purposes, the University
makes clear that no condition is made or to be implied, nor is any
warranty given or to be implied, as to the accuracy of the Software,
or that it will be suitable for any particular purpose or for use
under any specific conditions. Furthermore, the University disclaims
all responsibility for the use which is made of the Software. It
further disclaims any liability for the outcomes arising from using
the Software.
The Licensee agrees to indemnify the University and hold the
University harmless from and against any and all claims, damages and
liabilities asserted by third parties (including claims for
negligence) which arise directly or indirectly from the use of the
Software or the sale of any products based on the Software.
No part of the Software may be reproduced, modified, transmitted or
transferred in any form or by any means, electronic or mechanical,
without the express permission of the University. The permission of
the University is not required if the said reproduction, modification,
transmission or transference is done without financial return, the
conditions of this Licence are imposed upon the receiver of the
product, and all original and amended source code is included in any
transmitted product. You may be held legally responsible for any
copyright infringement that is caused or encouraged by your failure to
abide by these terms and conditions.
You are not permitted under this Licence to use this Software
commercially. Use for which any financial return is received shall be
defined as commercial use, and includes (1) integration of all or part
of the source code or the Software into a product for sale or license
by or on behalf of Licensee to third parties or (2) use of the
Software or any derivative of it for research with the final aim of
developing software products for sale or license to a third party or
(3) use of the Software or any derivative of it for research with the
final aim of developing non-software products for sale or license to a
third party, or (4) use of the Software to provide any service to an
external organisation for which payment is received. If you are
interested in using the Software commercially, please contact Oxford
University Innovation ("OUI"), the technology transfer company of the
University, to negotiate a licence. Contact details are:
fsl@innovation.ox.ac.uk quoting Reference Project 9564, FSL.*/
#include "diffmodels_utils.h"
#include "levenberg_marquardt.cu"
#include "options.h"
#include <fstream>
/////////////////////////////////////
/////////////////////////////////////
/// PVM_single_c ///
/////////////////////////////////////
/////////////////////////////////////
__device__
inline float isoterm_PVM_single_c(const int pt,const float* _d,const float *bvals){
return exp(-bvals[pt]**_d);
}
__device__
inline float isoterm_lambda_PVM_single_c(const int pt,const float lambda,const float *bvals){
return(-2*bvals[pt]*lambda*exp(-bvals[pt]*lambda*lambda));
}
__device__
inline float anisoterm_PVM_single_c(const int pt,const float* _d,const float3 x, const float *bvecs, const float *bvals, const int ndirections){
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
return exp(-bvals[pt]**_d*dp*dp);
}
__device__
inline float anisoterm_lambda_PVM_single_c(const int pt,const float lambda,const float3 x, const float *bvecs, const float *bvals, const int ndirections){
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
return(-2*bvals[pt]*lambda*dp*dp*exp(-bvals[pt]*lambda*lambda*dp*dp));
}
__device__
inline float anisoterm_th_PVM_single_c(const int pt,const float* _d,const float3 x, const float _th,const float _ph,const float *bvecs, const float *bvals, const int ndirections){
float sinth,costh,sinph,cosph;
sincos(_th,&sinth,&costh);
sincos(_ph,&sinph,&cosph);
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
float dp1 = costh*(bvecs[pt]*cosph+bvecs[ndirections+pt]*sinph)-bvecs[(2*ndirections)+pt]*sinth;
return(-2*bvals[pt]**_d*dp*dp1*exp(-bvals[pt]**_d*dp*dp));
}
__device__
inline float anisoterm_ph_PVM_single_c(const int pt,const float* _d,const float3 x, const float _th,const float _ph,const float *bvecs, const float *bvals, const int ndirections){
float sinth,sinph,cosph;
sinth=sin(_th);
sincos(_ph,&sinph,&cosph);
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
float dp1 = sinth*(-bvecs[pt]*sinph+bvecs[ndirections+pt]*cosph);
return(-2*bvals[pt]**_d*dp*dp1*exp(-bvals[pt]**_d*dp*dp));
}
//If the sum of the fractions is >1, then zero as many fractions
//as necessary, so that the sum becomes smaller than 1.
//in diffmodel.cc
__device__ void fix_fsum_PVM_single_c( //INPUT
int nfib,
//INPUT - OUTPUT){
float *fs)
{
float sumf=0.0;
for(int i=0;i<nfib;i++){
sumf+=fs[i];
if(sumf>=1){
for(int j=i;j<nfib;j++)
fs[j]=FSMALL_gpu; //make the fraction almost zero
break;
}
}
}
//in diffmodel.cc
__device__ void sort_PVM_single_c(int nfib,float* params)
{
float temp_f, temp_th, temp_ph;
// Order vector descending using f parameters as index
for(int i=1; i<(nfib); i++){
for(int j=0; j<(nfib-i); j++){
if (params[2+j*3] < params[2+(j+1)*3]){
temp_f = params[2+j*3];
temp_th = params[2+j*3+1];
temp_ph = params[2+j*3+2];
params[2+j*3] = params[2+(j+1)*3];
params[2+j*3+1] = params[2+(j+1)*3+1];
params[2+j*3+2] = params[2+(j+1)*3+2];
params[2+(j+1)*3] = temp_f;
params[2+(j+1)*3+1] = temp_th;
params[2+(j+1)*3+2] = temp_ph;
}
}
}
}
__device__ void fractions_deriv_PVM_single_c( //INPUT
const float* params,
const float* fs,
const int nfib,
const int idSubVOX,
//OUTPUT
float* Deriv)
{
int nparams_per_fibre=3;
float fsum;
int k=idSubVOX%nfib;
for (int j=0; j<nfib; j++){
Deriv[j*nfib+k]=0;
}
int kk = 2+(k*nparams_per_fibre);
float sinparamkk = sin(2*params[kk]);
for (int j=0; j<nfib; j++){
int jj = 2+(j*nparams_per_fibre);
if (j==k){
fsum=1;
for (int n=0; n<=(j-1); n++){
fsum-=fs[n];
}
Deriv[j*nfib+k]=sinparamkk*fsum;
}else if (j>k){
float sinparam = sin(params[jj]);
fsum=0;
for (int n=0; n<=(j-1); n++){
fsum+=Deriv[n*nfib+k];
}
Deriv[j*nfib+k]= -(sinparam*sinparam)*fsum;
}
}
}
//cost function PVM_single_c
__device__ void cf_PVM_single_c( //INPUT
const float* params,
const float* mdata,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* reduction, //shared memory
float* fs, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
//OUTPUT
double* cfv)
{
if(idSubVOX<nfib){
int kk = 2+3*(idSubVOX);
float sinth,costh,sinph,cosph;
sincos(params[kk+1],&sinth,&costh);
sincos(params[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
*_d = lambda2d_gpu(params[1]);
*cfv = 0.0;
*sumf=0;
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(params[kk])*partial_fsum;
*sumf += fs[k];
}
}
int ndir = ndirections/THREADS_BLOCK_FIT;
if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++;
float err;
float3 x2;
int dir_iter=idSubVOX;
__syncthreads();
reduction[idSubVOX]=0;
for(int dir=0;dir<ndir;dir++){
err = 0.0;
for(int k=0;k<nfib;k++){
x2.x=x[k*3];
x2.y=x[k*3+1];
x2.z=x[k*3+2];
err += fs[k]*anisoterm_PVM_single_c(dir_iter,_d,x2,bvecs,bvals,ndirections);
}
if(m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<nfib;j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0=beta2f_gpu(params[nparams-1])*partial_fsum;
err= (params[0]*((temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_single_c(dir_iter,_d,bvals))+err))-mdata[dir_iter];
}else{
err = params[0]*((1-*sumf)*isoterm_PVM_single_c(dir_iter,_d,bvals)+err)-mdata[dir_iter];
}
reduction[idSubVOX]+= err*err;
dir_iter+=THREADS_BLOCK_FIT;
}
__syncthreads();
if(idSubVOX==0){
for(int i=0;i<THREADS_BLOCK_FIT;i++){
*cfv+=reduction[i];
}
}
}
//gradient function PVM_single_c
__device__ void grad_PVM_single_c( //INPUT
const float* params,
const float* mdata,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* f_deriv, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
//OUTPUT
float* grad)
{
if(idSubVOX<nfib){
int kk = 2+3*(idSubVOX);
float sinth,costh,sinph,cosph;
sincos(params[kk+1],&sinth,&costh);
sincos(params[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
*_d = lambda2d_gpu(params[1]);
*sumf=0;
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(params[kk])*partial_fsum;
*sumf += fs[k];
}
for (int p=0;p<nparams;p++) grad[p]=0;
}
__syncthreads();
if(idSubVOX<nfib){
fractions_deriv_PVM_single_c(params,fs,nfib,idSubVOX,f_deriv);
}
int ndir = ndirections/THREADS_BLOCK_FIT;
if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++;
int max_dir = ndirections/THREADS_BLOCK_FIT;
if(ndirections%THREADS_BLOCK_FIT) max_dir++;
float* myJ = &J[idSubVOX*nparams];
float diff;
float sig;
float Iso_term;
float3 xx;
int dir_iter=idSubVOX;
//float Aniso_terms[MAXNFIBRES]; //reuse Shared J --- myJ[kk+1]
__syncthreads();
for(int dir=0;dir<max_dir;dir++){
for (int p=0; p<nparams; p++) myJ[p]=0;
if(dir<ndir){
for(int k=0;k<nfib;k++){
int kk = 2+3*(k) +1;
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
//Aniso_terms[k]=anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
myJ[kk] = anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
}
Iso_term=isoterm_PVM_single_c(dir_iter,_d,bvals); //Precompute some terms for this datapoint
sig = 0;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
sig += fs[k]*myJ[kk+1];//Aniso_terms[k];
myJ[1] += params[0]*fs[k]*anisoterm_lambda_PVM_single_c(dir_iter,params[1],xx,bvecs,bvals,ndirections);
myJ[kk] = 0;
for (int j=0;j<nfib;j++){
if(f_deriv[j*nfib+k]!=0){
//myJ[kk] += params[0]*(Aniso_terms[j]-Iso_term)*f_deriv[j*nfib+k];
myJ[kk] += params[0]*(myJ[2+j*3+1]-Iso_term)*f_deriv[j*nfib+k];
}
}
}
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
myJ[kk+1] = params[0]*fs[k]*anisoterm_th_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
myJ[kk+2] = params[0]*fs[k]*anisoterm_ph_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
}
if(m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<(nfib);j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0=beta2f_gpu(params[nparams-1])*partial_fsum;
//derivative with respect to f0
myJ[nparams-1]= params[0]*(1-Iso_term)*sin(float(2*params[nparams-1]))*partial_fsum;
sig=params[0]*((temp_f0+(1-*sumf-temp_f0)*Iso_term)+sig);
myJ[1] += params[0]*(1-*sumf-temp_f0)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}else{
sig = params[0]*((1-*sumf)*Iso_term+sig);
myJ[1] += params[0]*(1-*sumf)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}
diff = sig - mdata[dir_iter];
myJ[0] = sig/params[0];
}
for (int p=0;p<nparams;p++){
reduction[idSubVOX]=2*myJ[p]*diff;
__syncthreads();
if(idSubVOX==0){
for(int i=0;i<THREADS_BLOCK_FIT;i++){
grad[p] += reduction[i];
}
}
__syncthreads();
}
dir_iter+=THREADS_BLOCK_FIT;
}
}
//hessian function PVM_single_c
__device__ void hess_PVM_single_c( //INPUT
const float* params,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* f_deriv, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
//OUTPUT
float* hess)
{
if(idSubVOX<nfib){
int kk = 2+3*(idSubVOX);
float sinth,costh,sinph,cosph;
sincos(params[kk+1],&sinth,&costh);
sincos(params[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
*_d = lambda2d_gpu(params[1]);
*sumf=0;
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(params[kk])*partial_fsum;
*sumf += fs[k];
}
for (int p=0;p<nparams;p++){
for (int p2=0;p2<nparams;p2++){
hess[p*nparams+p2] = 0;
}
}
}
__syncthreads();
if(idSubVOX<nfib){
fractions_deriv_PVM_single_c(params,fs,nfib,idSubVOX,f_deriv);
}
int ndir = ndirections/THREADS_BLOCK_FIT;
if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++;
int max_dir = ndirections/THREADS_BLOCK_FIT;
if(ndirections%THREADS_BLOCK_FIT) max_dir++;
float* myJ = &J[idSubVOX*nparams];
float sig;
float Iso_term;
float3 xx;
int dir_iter=idSubVOX;
//float Aniso_terms[MAXNFIBRES]; //reuse Shared J --- myJ[kk+1]
__syncthreads();
for(int dir=0;dir<max_dir;dir++){
for (int p=0; p<nparams; p++) myJ[p]=0;
if(dir<ndir){
for(int k=0;k<nfib;k++){
int kk = 2+3*(k) +1;
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
//Aniso_terms[k]=anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
myJ[kk] = anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
}
Iso_term=isoterm_PVM_single_c(dir_iter,_d,bvals); //Precompute some terms for this datapoint
sig = 0;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
sig += fs[k]*myJ[kk+1];//Aniso_terms[k];
myJ[1] += params[0]*fs[k]*anisoterm_lambda_PVM_single_c(dir_iter,params[1],xx,bvecs,bvals,ndirections);
for (int j=0; j<nfib; j++){
if (f_deriv[j*nfib+k]!=0)
//myJ[kk] += params[0]*(Aniso_terms[j]-Iso_term)*f_deriv[j*nfib+k];
myJ[kk] += params[0]*(myJ[2+3*j+1]-Iso_term)*f_deriv[j*nfib+k];
}
}
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
myJ[kk+1] = params[0]*fs[k]*anisoterm_th_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
myJ[kk+2] = params[0]*fs[k]*anisoterm_ph_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
}
if(m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<(nfib);j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0=beta2f_gpu(params[nparams-1])*partial_fsum;
//derivative with respect to f0
myJ[nparams-1]= params[0]*(1-Iso_term)*sin(float(2*params[nparams-1]))*partial_fsum;
sig= params[0]*((temp_f0+(1-*sumf-temp_f0)*Iso_term)+sig);
myJ[1] += params[0]*(1-*sumf-temp_f0)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}else{
sig = params[0]*((1-*sumf)*Iso_term+sig);
myJ[1] += params[0]*(1-*sumf)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}
myJ[0] = sig/params[0];
}
for (int p=0;p<nparams;p++){
for (int p2=p;p2<nparams;p2++){
reduction[idSubVOX]=2*(myJ[p]*myJ[p2]);
__syncthreads();
if(idSubVOX==0){
for(int i=0;i<THREADS_BLOCK_FIT;i++){
hess[p*nparams+p2] += reduction[i];
}
}
__syncthreads();
}
}
dir_iter+=THREADS_BLOCK_FIT;
}
if(idSubVOX==0){
for (int j=0; j<nparams; j++) {
for (int i=j+1; i<nparams; i++) {
hess[i*nparams+j]=hess[j*nparams+i];
}
}
}
}
//in diffmodel.cc
extern "C" __global__ void fit_PVM_single_c_kernel( //INPUT
const float* data,
const float* bvecs,
const float* bvals,
const int nvox,
const int ndirections,
const int nfib,
const int nparams,
const bool m_eval_BIC,
const bool m_include_f0,
const bool m_return_fanning,
const bool gradnonlin,
//INPUT - OUTPUT
float* params)
{
int idSubVOX = threadIdx.x;
int idVOX = blockIdx.x;
int threadsBlock = blockDim.x;
////////// DYNAMIC SHARED MEMORY ///////////
extern __shared__ double shared[];
double* pcf = (double*) shared; //1
double* ncf = (double*) &pcf[1]; //1
double* lambda = (double*) &ncf[1]; //1
double* cftol = (double*) &lambda[1]; //1
double* ltol = (double*) &cftol[1]; //1
double* olambda = (double*) <ol[1]; //1
float* J = (float*)&olambda[1]; //threadsBlock*nparams
float* reduction = (float*)&J[threadsBlock*nparams]; //threadsBlock
float* myparams = (float*) &reduction[threadsBlock]; //nparams
float* grad = (float*) &myparams[nparams]; //nparams
float* hess = (float*) &grad[nparams]; //nparams*nparams
float* step = (float*) &hess[nparams*nparams]; //nparams
float* inverse = (float*) &step[nparams]; //nparams
float* fs = (float*) &inverse[nparams]; //nfib
float* f_deriv = (float*) &fs[nfib]; //nfib*nfib
float* x = (float*) &f_deriv[nfib*nfib]; //nfib*3
float* _d = (float*) &x[nfib*3]; //1
float* sumf = (float*) &_d[1]; //1
float* C = (float*)&sumf[1]; //nparams*nparams;
float* el = (float*)&C[nparams*nparams]; //nparams
int* indx = (int*)&el[nparams]; //nparams
int* success = (int*) &indx[nparams]; //1
int* end = (int*) &success[1]; //1
////////// DYNAMIC SHARED MEMORY ///////////
if(idSubVOX<nparams){
myparams[idSubVOX]=params[(idVOX*nparams)+idSubVOX];
}
__syncthreads();
int pos_bvals, pos_bvecs;
if(gradnonlin){
pos_bvals=idVOX*ndirections;
pos_bvecs=idVOX*3*ndirections;
}else{
pos_bvals=0;
pos_bvecs=0;
}
//do the fit
levenberg_marquardt_PVM_single_c_gpu(&data[idVOX*ndirections],&bvecs[pos_bvecs],&bvals[pos_bvals],ndirections,nfib,nparams,m_include_f0,idSubVOX,step,grad,hess,inverse, pcf,ncf,lambda,cftol,ltol,olambda,success,end,J,reduction,fs,f_deriv,x,_d,sumf,C,el,indx,myparams);
__syncthreads();
// finalise parameters
// m_s0-myparams[0] m_d-myparams[1] m_f-m_th-m_ph-myparams[2,3,4,5, etc..] m_f0-myparams[nparams-1]
if(idSubVOX==0){
myparams[1] = lambda2d_gpu(myparams[1]);
for(int k=0;k<nfib;k++){
int kk = 2 + 3*(k);
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=myparams[2 + 3*j];
//////////////////////////
myparams[kk] = beta2f_gpu(myparams[kk])*partial_fsum;
}
if (m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<(nfib);j++){
partial_fsum-=myparams[2 + 3*j];
}
//////////////////////////
myparams[nparams-1]= beta2f_gpu(myparams[nparams-1])*partial_fsum;
}
sort_PVM_single_c(nfib,myparams);
}
__syncthreads();
if(idSubVOX<nparams){
params[(idVOX*nparams)+idSubVOX] = myparams[idSubVOX];
}
}
//in diffmodel.cc
extern "C" __global__ void get_residuals_PVM_single_c_kernel( //INPUT
const float* data,
const float* params,
const float* bvecs,
const float* bvals,
const int nvox,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const bool gradnonlin,
const bool* includes_f0,
//OUTPUT
float* residuals)
{
int idSubVOX = threadIdx.x;
int idVOX = blockIdx.x;
int threadsBlock = blockDim.x;
////////// DYNAMIC SHARED MEMORY ///////////
extern __shared__ double shared[];
float* myparams = (float*) shared; //nparams
float* fs = (float*) &myparams[nparams]; //nfib
float* x = (float*) &fs[nfib]; //nfib*3
float* _d = (float*) &x[nfib*3]; //1
float* sumf = (float*) &_d[1]; //1
int* my_include_f0 = (int*) &sumf[1]; //1
////////// DYNAMIC SHARED MEMORY ///////////
float val;
float predicted_signal;
float mydata;
if(idSubVOX==0){
*my_include_f0 = includes_f0[idVOX];
//m_s0-myparams[0] m_d-myparams[1] m_f-m_th-m_ph-myparams[2,3,4,5 etc..] m_f0-myparams[nparams-1]
myparams[0]=params[(idVOX*nparams)+0];
if(myparams[1]<0) myparams[1] = 0; //This can be due to numerical errors..sqrt
else myparams[1] = d2lambda_gpu(params[(idVOX*nparams)+1]);
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*k;
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = params[(idVOX*nparams)+kk];
float tmpr=fs[k]/partial_fsum;
if (tmpr>1.0) tmpr=1; //This can be due to numerical errors
if (tmpr<0.0) tmpr=0; //This can be due to numerical errors..sqrt
myparams[kk] = f2beta_gpu(tmpr);
myparams[kk+1] = params[(idVOX*nparams)+kk+1];
myparams[kk+2] = params[(idVOX*nparams)+kk+2];
}
if (*my_include_f0){
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<nfib;j++)
partial_fsum-=fs[j];
//////////////////////////
float tmpr=params[(idVOX*nparams)+nparams-1]/partial_fsum;
if (tmpr>1.0) tmpr=1; //This can be due to numerical errors..asin
if (tmpr<0.0) tmpr=0; //This can be due to numerical errors..sqrt
myparams[nparams-1]= f2beta_gpu(tmpr);
}
}
__syncthreads();
if(idSubVOX<nfib){
int kk = 2+3*idSubVOX;
float sinth,costh,sinph,cosph;
sincos(myparams[kk+1],&sinth,&costh);
sincos(myparams[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
float partial_fsum;
*sumf=0;
for(int k=0;k<nfib;k++){
int kk = 2+3*k;
////// partial_fsum //////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(myparams[kk])*partial_fsum;
*sumf += fs[k];
}
*_d = lambda2d_gpu(myparams[1]);
}
int ndir = ndirections/threadsBlock;
if(idSubVOX<(ndirections%threadsBlock)) ndir++;
float3 x2;
int dir_iter=idSubVOX;
__syncthreads();
int pos_bvals, pos_bvecs;
if(gradnonlin){
pos_bvals=idVOX*ndirections;
pos_bvecs=idVOX*3*ndirections;
}else{
pos_bvals=0;
pos_bvecs=0;
}
for(int dir=0;dir<ndir;dir++){
mydata = data[(idVOX*ndirections)+dir_iter];
predicted_signal=0; //pred = 0;
val = 0.0;
for(int k=0;k<nfib;k++){
x2.x=x[k*3];
x2.y=x[k*3+1];
x2.z=x[k*3+2];
val += fs[k]*anisoterm_PVM_single_c(dir_iter,_d,x2,&bvecs[pos_bvecs],&bvals[pos_bvals],ndirections);
}
if (*my_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<nfib;j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0= beta2f_gpu(myparams[nparams-1])*partial_fsum;
predicted_signal = myparams[0]*(temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_single_c(dir_iter,_d,&bvals[pos_bvals])+val);
}else{
predicted_signal = myparams[0]*((1-*sumf)*isoterm_PVM_single_c(dir_iter,_d,&bvals[pos_bvals])+val);
}
//residuals=m_data-predicted_signal;
residuals[idVOX*ndirections+dir_iter]= mydata - predicted_signal;
dir_iter+=threadsBlock;
}
}
| 27f026f76d3041a2d3b06b7325a753295e57ffbe.cu | /* PVM_single_c.cu
Tim Behrens, Saad Jbabdi, Stam Sotiropoulos, Moises Hernandez - FMRIB Image Analysis Group
Copyright (C) 2005 University of Oxford */
/* Part of FSL - FMRIB's Software Library
http://www.fmrib.ox.ac.uk/fsl
fsl@fmrib.ox.ac.uk
Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance
Imaging of the Brain), Department of Clinical Neurology, Oxford
University, Oxford, UK
LICENCE
FMRIB Software Library, Release 6.0 (c) 2018, The University of
Oxford (the "Software")
The Software remains the property of the Oxford University Innovation
("the University").
The Software is distributed "AS IS" under this Licence solely for
non-commercial use in the hope that it will be useful, but in order
that the University as a charitable foundation protects its assets for
the benefit of its educational and research purposes, the University
makes clear that no condition is made or to be implied, nor is any
warranty given or to be implied, as to the accuracy of the Software,
or that it will be suitable for any particular purpose or for use
under any specific conditions. Furthermore, the University disclaims
all responsibility for the use which is made of the Software. It
further disclaims any liability for the outcomes arising from using
the Software.
The Licensee agrees to indemnify the University and hold the
University harmless from and against any and all claims, damages and
liabilities asserted by third parties (including claims for
negligence) which arise directly or indirectly from the use of the
Software or the sale of any products based on the Software.
No part of the Software may be reproduced, modified, transmitted or
transferred in any form or by any means, electronic or mechanical,
without the express permission of the University. The permission of
the University is not required if the said reproduction, modification,
transmission or transference is done without financial return, the
conditions of this Licence are imposed upon the receiver of the
product, and all original and amended source code is included in any
transmitted product. You may be held legally responsible for any
copyright infringement that is caused or encouraged by your failure to
abide by these terms and conditions.
You are not permitted under this Licence to use this Software
commercially. Use for which any financial return is received shall be
defined as commercial use, and includes (1) integration of all or part
of the source code or the Software into a product for sale or license
by or on behalf of Licensee to third parties or (2) use of the
Software or any derivative of it for research with the final aim of
developing software products for sale or license to a third party or
(3) use of the Software or any derivative of it for research with the
final aim of developing non-software products for sale or license to a
third party, or (4) use of the Software to provide any service to an
external organisation for which payment is received. If you are
interested in using the Software commercially, please contact Oxford
University Innovation ("OUI"), the technology transfer company of the
University, to negotiate a licence. Contact details are:
fsl@innovation.ox.ac.uk quoting Reference Project 9564, FSL.*/
#include "diffmodels_utils.h"
#include "levenberg_marquardt.cu"
#include "options.h"
#include <fstream>
/////////////////////////////////////
/////////////////////////////////////
/// PVM_single_c ///
/////////////////////////////////////
/////////////////////////////////////
__device__
inline float isoterm_PVM_single_c(const int pt,const float* _d,const float *bvals){
return exp(-bvals[pt]**_d);
}
__device__
inline float isoterm_lambda_PVM_single_c(const int pt,const float lambda,const float *bvals){
return(-2*bvals[pt]*lambda*exp(-bvals[pt]*lambda*lambda));
}
__device__
inline float anisoterm_PVM_single_c(const int pt,const float* _d,const float3 x, const float *bvecs, const float *bvals, const int ndirections){
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
return exp(-bvals[pt]**_d*dp*dp);
}
__device__
inline float anisoterm_lambda_PVM_single_c(const int pt,const float lambda,const float3 x, const float *bvecs, const float *bvals, const int ndirections){
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
return(-2*bvals[pt]*lambda*dp*dp*exp(-bvals[pt]*lambda*lambda*dp*dp));
}
__device__
inline float anisoterm_th_PVM_single_c(const int pt,const float* _d,const float3 x, const float _th,const float _ph,const float *bvecs, const float *bvals, const int ndirections){
float sinth,costh,sinph,cosph;
sincos(_th,&sinth,&costh);
sincos(_ph,&sinph,&cosph);
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
float dp1 = costh*(bvecs[pt]*cosph+bvecs[ndirections+pt]*sinph)-bvecs[(2*ndirections)+pt]*sinth;
return(-2*bvals[pt]**_d*dp*dp1*exp(-bvals[pt]**_d*dp*dp));
}
__device__
inline float anisoterm_ph_PVM_single_c(const int pt,const float* _d,const float3 x, const float _th,const float _ph,const float *bvecs, const float *bvals, const int ndirections){
float sinth,sinph,cosph;
sinth=sin(_th);
sincos(_ph,&sinph,&cosph);
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
float dp1 = sinth*(-bvecs[pt]*sinph+bvecs[ndirections+pt]*cosph);
return(-2*bvals[pt]**_d*dp*dp1*exp(-bvals[pt]**_d*dp*dp));
}
//If the sum of the fractions is >1, then zero as many fractions
//as necessary, so that the sum becomes smaller than 1.
//in diffmodel.cc
__device__ void fix_fsum_PVM_single_c( //INPUT
int nfib,
//INPUT - OUTPUT){
float *fs)
{
float sumf=0.0;
for(int i=0;i<nfib;i++){
sumf+=fs[i];
if(sumf>=1){
for(int j=i;j<nfib;j++)
fs[j]=FSMALL_gpu; //make the fraction almost zero
break;
}
}
}
//in diffmodel.cc
__device__ void sort_PVM_single_c(int nfib,float* params)
{
float temp_f, temp_th, temp_ph;
// Order vector descending using f parameters as index
for(int i=1; i<(nfib); i++){
for(int j=0; j<(nfib-i); j++){
if (params[2+j*3] < params[2+(j+1)*3]){
temp_f = params[2+j*3];
temp_th = params[2+j*3+1];
temp_ph = params[2+j*3+2];
params[2+j*3] = params[2+(j+1)*3];
params[2+j*3+1] = params[2+(j+1)*3+1];
params[2+j*3+2] = params[2+(j+1)*3+2];
params[2+(j+1)*3] = temp_f;
params[2+(j+1)*3+1] = temp_th;
params[2+(j+1)*3+2] = temp_ph;
}
}
}
}
__device__ void fractions_deriv_PVM_single_c( //INPUT
const float* params,
const float* fs,
const int nfib,
const int idSubVOX,
//OUTPUT
float* Deriv)
{
int nparams_per_fibre=3;
float fsum;
int k=idSubVOX%nfib;
for (int j=0; j<nfib; j++){
Deriv[j*nfib+k]=0;
}
int kk = 2+(k*nparams_per_fibre);
float sinparamkk = sin(2*params[kk]);
for (int j=0; j<nfib; j++){
int jj = 2+(j*nparams_per_fibre);
if (j==k){
fsum=1;
for (int n=0; n<=(j-1); n++){
fsum-=fs[n];
}
Deriv[j*nfib+k]=sinparamkk*fsum;
}else if (j>k){
float sinparam = sin(params[jj]);
fsum=0;
for (int n=0; n<=(j-1); n++){
fsum+=Deriv[n*nfib+k];
}
Deriv[j*nfib+k]= -(sinparam*sinparam)*fsum;
}
}
}
//cost function PVM_single_c
__device__ void cf_PVM_single_c( //INPUT
const float* params,
const float* mdata,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* reduction, //shared memory
float* fs, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
//OUTPUT
double* cfv)
{
if(idSubVOX<nfib){
int kk = 2+3*(idSubVOX);
float sinth,costh,sinph,cosph;
sincos(params[kk+1],&sinth,&costh);
sincos(params[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
*_d = lambda2d_gpu(params[1]);
*cfv = 0.0;
*sumf=0;
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(params[kk])*partial_fsum;
*sumf += fs[k];
}
}
int ndir = ndirections/THREADS_BLOCK_FIT;
if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++;
float err;
float3 x2;
int dir_iter=idSubVOX;
__syncthreads();
reduction[idSubVOX]=0;
for(int dir=0;dir<ndir;dir++){
err = 0.0;
for(int k=0;k<nfib;k++){
x2.x=x[k*3];
x2.y=x[k*3+1];
x2.z=x[k*3+2];
err += fs[k]*anisoterm_PVM_single_c(dir_iter,_d,x2,bvecs,bvals,ndirections);
}
if(m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<nfib;j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0=beta2f_gpu(params[nparams-1])*partial_fsum;
err= (params[0]*((temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_single_c(dir_iter,_d,bvals))+err))-mdata[dir_iter];
}else{
err = params[0]*((1-*sumf)*isoterm_PVM_single_c(dir_iter,_d,bvals)+err)-mdata[dir_iter];
}
reduction[idSubVOX]+= err*err;
dir_iter+=THREADS_BLOCK_FIT;
}
__syncthreads();
if(idSubVOX==0){
for(int i=0;i<THREADS_BLOCK_FIT;i++){
*cfv+=reduction[i];
}
}
}
//gradient function PVM_single_c
__device__ void grad_PVM_single_c( //INPUT
const float* params,
const float* mdata,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* f_deriv, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
//OUTPUT
float* grad)
{
if(idSubVOX<nfib){
int kk = 2+3*(idSubVOX);
float sinth,costh,sinph,cosph;
sincos(params[kk+1],&sinth,&costh);
sincos(params[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
*_d = lambda2d_gpu(params[1]);
*sumf=0;
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(params[kk])*partial_fsum;
*sumf += fs[k];
}
for (int p=0;p<nparams;p++) grad[p]=0;
}
__syncthreads();
if(idSubVOX<nfib){
fractions_deriv_PVM_single_c(params,fs,nfib,idSubVOX,f_deriv);
}
int ndir = ndirections/THREADS_BLOCK_FIT;
if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++;
int max_dir = ndirections/THREADS_BLOCK_FIT;
if(ndirections%THREADS_BLOCK_FIT) max_dir++;
float* myJ = &J[idSubVOX*nparams];
float diff;
float sig;
float Iso_term;
float3 xx;
int dir_iter=idSubVOX;
//float Aniso_terms[MAXNFIBRES]; //reuse Shared J --- myJ[kk+1]
__syncthreads();
for(int dir=0;dir<max_dir;dir++){
for (int p=0; p<nparams; p++) myJ[p]=0;
if(dir<ndir){
for(int k=0;k<nfib;k++){
int kk = 2+3*(k) +1;
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
//Aniso_terms[k]=anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
myJ[kk] = anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
}
Iso_term=isoterm_PVM_single_c(dir_iter,_d,bvals); //Precompute some terms for this datapoint
sig = 0;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
sig += fs[k]*myJ[kk+1];//Aniso_terms[k];
myJ[1] += params[0]*fs[k]*anisoterm_lambda_PVM_single_c(dir_iter,params[1],xx,bvecs,bvals,ndirections);
myJ[kk] = 0;
for (int j=0;j<nfib;j++){
if(f_deriv[j*nfib+k]!=0){
//myJ[kk] += params[0]*(Aniso_terms[j]-Iso_term)*f_deriv[j*nfib+k];
myJ[kk] += params[0]*(myJ[2+j*3+1]-Iso_term)*f_deriv[j*nfib+k];
}
}
}
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
myJ[kk+1] = params[0]*fs[k]*anisoterm_th_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
myJ[kk+2] = params[0]*fs[k]*anisoterm_ph_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
}
if(m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<(nfib);j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0=beta2f_gpu(params[nparams-1])*partial_fsum;
//derivative with respect to f0
myJ[nparams-1]= params[0]*(1-Iso_term)*sin(float(2*params[nparams-1]))*partial_fsum;
sig=params[0]*((temp_f0+(1-*sumf-temp_f0)*Iso_term)+sig);
myJ[1] += params[0]*(1-*sumf-temp_f0)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}else{
sig = params[0]*((1-*sumf)*Iso_term+sig);
myJ[1] += params[0]*(1-*sumf)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}
diff = sig - mdata[dir_iter];
myJ[0] = sig/params[0];
}
for (int p=0;p<nparams;p++){
reduction[idSubVOX]=2*myJ[p]*diff;
__syncthreads();
if(idSubVOX==0){
for(int i=0;i<THREADS_BLOCK_FIT;i++){
grad[p] += reduction[i];
}
}
__syncthreads();
}
dir_iter+=THREADS_BLOCK_FIT;
}
}
//hessian function PVM_single_c
__device__ void hess_PVM_single_c( //INPUT
const float* params,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* f_deriv, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
//OUTPUT
float* hess)
{
if(idSubVOX<nfib){
int kk = 2+3*(idSubVOX);
float sinth,costh,sinph,cosph;
sincos(params[kk+1],&sinth,&costh);
sincos(params[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
*_d = lambda2d_gpu(params[1]);
*sumf=0;
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(params[kk])*partial_fsum;
*sumf += fs[k];
}
for (int p=0;p<nparams;p++){
for (int p2=0;p2<nparams;p2++){
hess[p*nparams+p2] = 0;
}
}
}
__syncthreads();
if(idSubVOX<nfib){
fractions_deriv_PVM_single_c(params,fs,nfib,idSubVOX,f_deriv);
}
int ndir = ndirections/THREADS_BLOCK_FIT;
if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++;
int max_dir = ndirections/THREADS_BLOCK_FIT;
if(ndirections%THREADS_BLOCK_FIT) max_dir++;
float* myJ = &J[idSubVOX*nparams];
float sig;
float Iso_term;
float3 xx;
int dir_iter=idSubVOX;
//float Aniso_terms[MAXNFIBRES]; //reuse Shared J --- myJ[kk+1]
__syncthreads();
for(int dir=0;dir<max_dir;dir++){
for (int p=0; p<nparams; p++) myJ[p]=0;
if(dir<ndir){
for(int k=0;k<nfib;k++){
int kk = 2+3*(k) +1;
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
//Aniso_terms[k]=anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
myJ[kk] = anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
}
Iso_term=isoterm_PVM_single_c(dir_iter,_d,bvals); //Precompute some terms for this datapoint
sig = 0;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
sig += fs[k]*myJ[kk+1];//Aniso_terms[k];
myJ[1] += params[0]*fs[k]*anisoterm_lambda_PVM_single_c(dir_iter,params[1],xx,bvecs,bvals,ndirections);
for (int j=0; j<nfib; j++){
if (f_deriv[j*nfib+k]!=0)
//myJ[kk] += params[0]*(Aniso_terms[j]-Iso_term)*f_deriv[j*nfib+k];
myJ[kk] += params[0]*(myJ[2+3*j+1]-Iso_term)*f_deriv[j*nfib+k];
}
}
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
myJ[kk+1] = params[0]*fs[k]*anisoterm_th_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
myJ[kk+2] = params[0]*fs[k]*anisoterm_ph_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
}
if(m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<(nfib);j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0=beta2f_gpu(params[nparams-1])*partial_fsum;
//derivative with respect to f0
myJ[nparams-1]= params[0]*(1-Iso_term)*sin(float(2*params[nparams-1]))*partial_fsum;
sig= params[0]*((temp_f0+(1-*sumf-temp_f0)*Iso_term)+sig);
myJ[1] += params[0]*(1-*sumf-temp_f0)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}else{
sig = params[0]*((1-*sumf)*Iso_term+sig);
myJ[1] += params[0]*(1-*sumf)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}
myJ[0] = sig/params[0];
}
for (int p=0;p<nparams;p++){
for (int p2=p;p2<nparams;p2++){
reduction[idSubVOX]=2*(myJ[p]*myJ[p2]);
__syncthreads();
if(idSubVOX==0){
for(int i=0;i<THREADS_BLOCK_FIT;i++){
hess[p*nparams+p2] += reduction[i];
}
}
__syncthreads();
}
}
dir_iter+=THREADS_BLOCK_FIT;
}
if(idSubVOX==0){
for (int j=0; j<nparams; j++) {
for (int i=j+1; i<nparams; i++) {
hess[i*nparams+j]=hess[j*nparams+i];
}
}
}
}
//in diffmodel.cc
extern "C" __global__ void fit_PVM_single_c_kernel( //INPUT
const float* data,
const float* bvecs,
const float* bvals,
const int nvox,
const int ndirections,
const int nfib,
const int nparams,
const bool m_eval_BIC,
const bool m_include_f0,
const bool m_return_fanning,
const bool gradnonlin,
//INPUT - OUTPUT
float* params)
{
int idSubVOX = threadIdx.x;
int idVOX = blockIdx.x;
int threadsBlock = blockDim.x;
////////// DYNAMIC SHARED MEMORY ///////////
extern __shared__ double shared[];
double* pcf = (double*) shared; //1
double* ncf = (double*) &pcf[1]; //1
double* lambda = (double*) &ncf[1]; //1
double* cftol = (double*) &lambda[1]; //1
double* ltol = (double*) &cftol[1]; //1
double* olambda = (double*) <ol[1]; //1
float* J = (float*)&olambda[1]; //threadsBlock*nparams
float* reduction = (float*)&J[threadsBlock*nparams]; //threadsBlock
float* myparams = (float*) &reduction[threadsBlock]; //nparams
float* grad = (float*) &myparams[nparams]; //nparams
float* hess = (float*) &grad[nparams]; //nparams*nparams
float* step = (float*) &hess[nparams*nparams]; //nparams
float* inverse = (float*) &step[nparams]; //nparams
float* fs = (float*) &inverse[nparams]; //nfib
float* f_deriv = (float*) &fs[nfib]; //nfib*nfib
float* x = (float*) &f_deriv[nfib*nfib]; //nfib*3
float* _d = (float*) &x[nfib*3]; //1
float* sumf = (float*) &_d[1]; //1
float* C = (float*)&sumf[1]; //nparams*nparams;
float* el = (float*)&C[nparams*nparams]; //nparams
int* indx = (int*)&el[nparams]; //nparams
int* success = (int*) &indx[nparams]; //1
int* end = (int*) &success[1]; //1
////////// DYNAMIC SHARED MEMORY ///////////
if(idSubVOX<nparams){
myparams[idSubVOX]=params[(idVOX*nparams)+idSubVOX];
}
__syncthreads();
int pos_bvals, pos_bvecs;
if(gradnonlin){
pos_bvals=idVOX*ndirections;
pos_bvecs=idVOX*3*ndirections;
}else{
pos_bvals=0;
pos_bvecs=0;
}
//do the fit
levenberg_marquardt_PVM_single_c_gpu(&data[idVOX*ndirections],&bvecs[pos_bvecs],&bvals[pos_bvals],ndirections,nfib,nparams,m_include_f0,idSubVOX,step,grad,hess,inverse, pcf,ncf,lambda,cftol,ltol,olambda,success,end,J,reduction,fs,f_deriv,x,_d,sumf,C,el,indx,myparams);
__syncthreads();
// finalise parameters
// m_s0-myparams[0] m_d-myparams[1] m_f-m_th-m_ph-myparams[2,3,4,5, etc..] m_f0-myparams[nparams-1]
if(idSubVOX==0){
myparams[1] = lambda2d_gpu(myparams[1]);
for(int k=0;k<nfib;k++){
int kk = 2 + 3*(k);
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=myparams[2 + 3*j];
//////////////////////////
myparams[kk] = beta2f_gpu(myparams[kk])*partial_fsum;
}
if (m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<(nfib);j++){
partial_fsum-=myparams[2 + 3*j];
}
//////////////////////////
myparams[nparams-1]= beta2f_gpu(myparams[nparams-1])*partial_fsum;
}
sort_PVM_single_c(nfib,myparams);
}
__syncthreads();
if(idSubVOX<nparams){
params[(idVOX*nparams)+idSubVOX] = myparams[idSubVOX];
}
}
//in diffmodel.cc
extern "C" __global__ void get_residuals_PVM_single_c_kernel( //INPUT
const float* data,
const float* params,
const float* bvecs,
const float* bvals,
const int nvox,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const bool gradnonlin,
const bool* includes_f0,
//OUTPUT
float* residuals)
{
int idSubVOX = threadIdx.x;
int idVOX = blockIdx.x;
int threadsBlock = blockDim.x;
////////// DYNAMIC SHARED MEMORY ///////////
extern __shared__ double shared[];
float* myparams = (float*) shared; //nparams
float* fs = (float*) &myparams[nparams]; //nfib
float* x = (float*) &fs[nfib]; //nfib*3
float* _d = (float*) &x[nfib*3]; //1
float* sumf = (float*) &_d[1]; //1
int* my_include_f0 = (int*) &sumf[1]; //1
////////// DYNAMIC SHARED MEMORY ///////////
float val;
float predicted_signal;
float mydata;
if(idSubVOX==0){
*my_include_f0 = includes_f0[idVOX];
//m_s0-myparams[0] m_d-myparams[1] m_f-m_th-m_ph-myparams[2,3,4,5 etc..] m_f0-myparams[nparams-1]
myparams[0]=params[(idVOX*nparams)+0];
if(myparams[1]<0) myparams[1] = 0; //This can be due to numerical errors..sqrt
else myparams[1] = d2lambda_gpu(params[(idVOX*nparams)+1]);
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*k;
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = params[(idVOX*nparams)+kk];
float tmpr=fs[k]/partial_fsum;
if (tmpr>1.0) tmpr=1; //This can be due to numerical errors
if (tmpr<0.0) tmpr=0; //This can be due to numerical errors..sqrt
myparams[kk] = f2beta_gpu(tmpr);
myparams[kk+1] = params[(idVOX*nparams)+kk+1];
myparams[kk+2] = params[(idVOX*nparams)+kk+2];
}
if (*my_include_f0){
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<nfib;j++)
partial_fsum-=fs[j];
//////////////////////////
float tmpr=params[(idVOX*nparams)+nparams-1]/partial_fsum;
if (tmpr>1.0) tmpr=1; //This can be due to numerical errors..asin
if (tmpr<0.0) tmpr=0; //This can be due to numerical errors..sqrt
myparams[nparams-1]= f2beta_gpu(tmpr);
}
}
__syncthreads();
if(idSubVOX<nfib){
int kk = 2+3*idSubVOX;
float sinth,costh,sinph,cosph;
sincos(myparams[kk+1],&sinth,&costh);
sincos(myparams[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
float partial_fsum;
*sumf=0;
for(int k=0;k<nfib;k++){
int kk = 2+3*k;
////// partial_fsum //////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(myparams[kk])*partial_fsum;
*sumf += fs[k];
}
*_d = lambda2d_gpu(myparams[1]);
}
int ndir = ndirections/threadsBlock;
if(idSubVOX<(ndirections%threadsBlock)) ndir++;
float3 x2;
int dir_iter=idSubVOX;
__syncthreads();
int pos_bvals, pos_bvecs;
if(gradnonlin){
pos_bvals=idVOX*ndirections;
pos_bvecs=idVOX*3*ndirections;
}else{
pos_bvals=0;
pos_bvecs=0;
}
for(int dir=0;dir<ndir;dir++){
mydata = data[(idVOX*ndirections)+dir_iter];
predicted_signal=0; //pred = 0;
val = 0.0;
for(int k=0;k<nfib;k++){
x2.x=x[k*3];
x2.y=x[k*3+1];
x2.z=x[k*3+2];
val += fs[k]*anisoterm_PVM_single_c(dir_iter,_d,x2,&bvecs[pos_bvecs],&bvals[pos_bvals],ndirections);
}
if (*my_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<nfib;j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0= beta2f_gpu(myparams[nparams-1])*partial_fsum;
predicted_signal = myparams[0]*(temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_single_c(dir_iter,_d,&bvals[pos_bvals])+val);
}else{
predicted_signal = myparams[0]*((1-*sumf)*isoterm_PVM_single_c(dir_iter,_d,&bvals[pos_bvals])+val);
}
//residuals=m_data-predicted_signal;
residuals[idVOX*ndirections+dir_iter]= mydata - predicted_signal;
dir_iter+=threadsBlock;
}
}
|
9c197cf8e8f40ca70d31bc88a0b475eabfaeab16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void GPU_SetAllCurrentsToZero(GPUCell **cells)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
//int i,l,k;
Cell *c,*c0 = cells[0],nc;
//double t;
// __shared__ extern CellDouble fd[9];
//double *src;//,*dst;
c = cells[ c0->getGlobalCellNumber(nx,ny,nz)];
nc = *c;
nc.SetAllCurrentsToZero(threadIdx);
}
__global__
void GPU_SetFieldsToCells(GPUCell **cells,
double *Ex,double *Ey,double *Ez,
double *Hx,double *Hy,double *Hz
)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
//int i,l,k;
Cell *c,*c0 = cells[0];
//double t;
c = cells[ c0->getGlobalCellNumber(nx,ny,nz)];
c->readFieldsFromArrays(Ex,Ey,Ez,Hx,Hy,Hz,threadIdx);
}
__host__ __device__
double CheckArraySize(double* a, double* dbg_a,int size)
{
// Cell<Particle> c = (*AllCells)[0];
int wrong = 0;
#ifdef CHECK_ARRAY_SIZE_DEBUG_PRINTS
printf("begin array checking1=============================\n");
#endif
for(int n = 0;n < size;n++)
{
//double t = a[n];
// double dt = dbg_a[n];
if(fabs(a[n] - dbg_a[n]) > SIZE_TOLERANCE)
{
//int3 i = c.getCellTripletNumber(n);
#ifdef CHECK_ARRAY_SIZE_DEBUG_PRINTS
printf("n %5d %15.5e dbg %15.5e diff %15.5e wrong %10d \n",
n,a[n],dbg_a[n],fabs(a[n] - dbg_a[n]),wrong++);
#endif
}
}
#ifdef CHECK_ARRAY_SIZE_DEBUG_PRINTS
printf(" end array checking=============================\n");
#endif
return (1.0-((double)wrong/(size)));
}
__global__ void GPU_WriteAllCurrents(GPUCell **cells,int n0,
double *jx,double *jy,double *jz,double *rho)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
Cell *c,*c0 = cells[0];
// __shared__ extern CellDouble fd[9];
c = cells[ c0->getGlobalCellNumber(nx,ny,nz)];
int i1,l1,k1;
i1 = threadIdx.x;
l1 = threadIdx.y;
k1 = threadIdx.z;
int n = c->getFortranCellNumber(c->i+i1-1,c->l+l1-1,c->k+k1-1);
if (n < 0 ) n = -n;
double t,t_x,t_y;
t_x = c->Jx->M[i1][l1][k1];
int3 i3 = c->getCellTripletNumber(n);
cuda_atomicAdd(&(jx[n]),t_x);
t_y= c->Jy->M[i1][l1][k1];
cuda_atomicAdd(&(jy[n]),t_y);
t = c->Jz->M[i1][l1][k1];
cuda_atomicAdd(&(jz[n]),t);
}
__global__ void GPU_WriteControlSystem(Cell **cells)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
// int i,l,k;
Cell *c,*c0 = cells[0],nc;
//double t;
// __shared__ extern CellDouble fd[9];
//double *src; //,*dst;
// int pqr2;
//CurrentTensor t1,t2;
c = cells[ c0->getGlobalCellNumber(nx,ny,nz)];
// c = cells[ n ];
nc = *c;
nc.SetControlSystemToParticles();
}
//TODO : 1. 3 separate kernels :
// A. form 3x3x3 array with number how many to fly and departure list with start positions in 3x3x3 array
// B. func to get 3x3x3 indexes from a pair of cell numbers, to and from function
// C. 2nd kernel to write arrival 3x3x3 matrices
/// D. 3rd kernel to form arrival positions in the particle list
// E. 4th to write arriving particles
__global__ void GPU_MakeDepartureLists(GPUCell **cells,int nt,int *d_stage)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
int ix,iy,iz;//,n;
Particle p;
Cell *c,*c0 = cells[0],nc;//,*new_c;
c = cells[c0->getGlobalCellNumber(nx,ny,nz)];
c->departureListLength = 0;
for(ix = 0;ix < 3;ix++)
{
for(iy = 0;iy < 3;iy++)
{
for(iz = 0;iz < 3;iz++)
{
c->departure[ix][iy][iz] = 0;
// c->departureIndex[ix][iy][iz] = 0;
}
}
}
c->departureListLength = 0;
for(int num = 0;num < c->number_of_particles; num++)
{
p = c->readParticleFromSurfaceDevice(num);
c->flyDirection(&p,&ix,&iy,&iz);
c->writeParticleToSurface(num,&p);
if(!c->isPointInCell(p.GetX())) //check Paricle = operator !!!!!!!!!!!!!!!!!!!!!!!!!!!
{
// c->flyDirection(&p,&ix,&iy,&iz);
// printf("fly %d:(%d,%d,%d) %d \n",p.direction,ix,iy,iz,ix*9 +iy*3 +iz);
c->writeParticleToSurface(num,&p);
// if(p.direction == 0) printf("Blin-hren'\n");
// f(p.direction != (ix | (iy << 2) |(iz << 4))) printf("Blin-hren'^2\n");
if(c->departureListLength == PARTICLES_FLYING_ONE_DIRECTION)
{
d_stage[0] = TOO_MANY_PARTICLES;
d_stage[1] = c->i;
d_stage[2] = c->l;
d_stage[3] = c->k;
d_stage[1] = ix;
d_stage[2] = iy;
d_stage[3] = iz;
return;
}
c->departureListLength++;
int num1 = c->departure[ix][iy][iz];
c->departureList[ix][iy][iz][num1] = p;
c->departure[ix][iy][iz] += 1;
}
else
{
// printf("home %d:(%d,%d,%d) %d \n",p.direction,ix,iy,iz,ix*9 +iy*3 +iz);
}
}
}
__global__ void GPU_MakeDepartureLists_rm(GPUCell **cells,int nt,int *d_stage)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
int ix,iy,iz;//,n;
Particle p;
Cell *c,*c0 = cells[0],nc;//,*new_c;
c = cells[c0->getGlobalCellNumber(nx,ny,nz)];
for(int num = 0;num < c->number_of_particles; num++)
{
p = c->readParticleFromSurfaceDevice(num);
if(p.direction != 13) //check Paricle = operator !!!!!!!!!!!!!!!!!!!!!!!!!!!
{
c->removeParticleFromSurfaceDevice(num,&p,&(c->number_of_particles));
num--;
}
}
}
__global__ void GPU_RemoveDepartureParticles(GPUCell **cells,int nt,int *d_stage)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
int ix,iy,iz;//,n;
Particle p;
Cell *c,*c0 = cells[0],nc;//,*new_c;
c = cells[c0->getGlobalCellNumber(nx,ny,nz)];
for(int num = 0;num < c->number_of_particles; num++)
{
p = c->readParticleFromSurfaceDevice(num);
if(!c->isPointInCell(p.GetX())) //check Paricle = operator !!!!!!!!!!!!!!!!!!!!!!!!!!!
{
c->removeParticleFromSurfaceDevice(num,&p,&(c->number_of_particles));
// c->flyDirection(&p,&ix,&iy,&iz);
//
// //if(p.direction == 0) printf("Blin-hren'\n");
// //f(p.direction != (ix | (iy << 2) |(iz << 4))) printf("Blin-hren'^2\n");
// if(c->departureListLength == PARTICLES_FLYING_ONE_DIRECTION)
// {
// d_stage[0] = TOO_MANY_PARTICLES;
// d_stage[1] = c->i;
// d_stage[2] = c->l;
// d_stage[3] = c->k;
// d_stage[1] = ix;
// d_stage[2] = iy;
// d_stage[3] = iz;
// return;
// }
// c->departureListLength++;
// int num1 = c->departure[ix][iy][iz];
//
// c->departureList[ix][iy][iz][num1] = p;
//
// c->departure[ix][iy][iz] += 1;
//
// num--;
}
}
}
__device__ int d_comd(double a,double b)
{
return (fabs(a - b) < TOLERANCE);
}
__device__ int d_compare(Particle p,Particle p1)
{
int tx = d_comd(p.x,p1.x);
int ty = d_comd(p.y,p1.y);
int tz = d_comd(p.z,p1.z);
int tpx = d_comd(p.pu,p1.pu);
double dpx = fabs(p.pu - p1.pu);
int tpy = d_comd(p.pv,p1.pv);
int tpz = d_comd(p.pw,p1.pw);
return (tx && ty && tz && tpx && tpy && tpz);
}
__global__ void GPU_ArrangeFlights(GPUCell **cells,int nt, int *d_stage)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
int ix,iy,iz,snd_ix,snd_iy,snd_iz,num,n;
Particle p,p1;
Cell *c,*c0 = cells[0],nc,*snd_c;
c = cells[ n = c0->getGlobalCellNumber(nx,ny,nz)];
for(ix = 0;ix < 3;ix++)
for(iy = 0;iy < 3;iy++)
for(iz = 0;iz < 3;iz++)
{
n = c0->getWrapCellNumber(nx+ix-1,ny+iy-1,nz+iz-1);
snd_c = cells[ n ];
for(int j = 0;j < snd_c->number_of_particles;j++)
{
p1 = snd_c->readParticleFromSurfaceDevice(j);
// snd_ix = ix;
// snd_iy = iy;
// snd_iz = iz;
//
// c->inverseDirection(&snd_ix,&snd_iy,&snd_iz);
//
// num = snd_c->departure[snd_ix][snd_iy][snd_iz];
//
//
//
// for(int i = 0;i < num;i++)
// {
// p = snd_c->departureList[snd_ix][snd_iy][snd_iz][i];
//
//
//
if(p1.direction != 13)
{
p1.direction = 13;
c->Insert(p1);
}
//
// }
}
}
}
__global__ void GPU_CollectStrayParticles(Cell **cells,int nt
// int n,
// int i,
// double mass,
// double q_mass,
// double *p_control,
// int jmp
)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
// int busy;
Particle p;
int n;
// int i,l,k;
Cell *c,*c0 = cells[0],nc,*new_c;
//int first = 1;
c = cells[ n = c0->getGlobalCellNumber(nx,ny,nz)];
for(int i = 0;i < c->number_of_particles; i++)
{
p = c->readParticleFromSurfaceDevice(i);
#ifdef STRAY_DEBUG_PRINTS
//if((p.fortran_number == 2498) && (p.sort == BEAM_ELECTRON))
// {
printf("STRAY-BASIC step %d cell %3d %d %d sort %d particle %d FORTRAN %5d X: %15.5e < %15.5e < %15.5e Y: %15.5e < %15.5e < %15.5e Z: %15.5e < %15.5e < %15.5e \n",
nt,c->i,c->l,c->k,(int)p.sort,i,p.fortran_number,
c->x0,p.x,c->x0+c->hx,
c->y0,p.y,c->y0+c->hy,
c->z0,p.z,c->z0+c->hz
);
//}
#endif
if(!c->isPointInCell(p.GetX()))// || (p.fortran_number == 753) )//|| (p.fortran_number == 10572))
{
#ifdef STRAY_DEBUG_PRINTS
printf("STRAY-OUT step %3d cell %3d %d %d sort %d particle %d FORTRAN %5d X: %15.5e < %25.17e < %15.5e \n",
nt,c->i,c->l,c->k,(int)p.sort,i,p.fortran_number,c->x0,p.x,c->x0+c->hx);
#endif
int new_n = c->getPointCell(p.GetX());
new_c = cells[new_n];
if(c->i == 99 && c->l == 0 && c->k == 3)
{
// c->printCellParticles();
// if(c->i >= c->Nx-1)
// {
#ifdef STRAY_DEBUG_PRINTS
// if((p.fortran_number == 2498) && (p.sort == BEAM_ELECTRON))
// {
// printf("c %3d (%d,%d,%d)->(%d,%d,%d) s %d p %d FN %5d X: %15.5e<%23.16e<%15.5e \n",n,c->i,c->l,c->k,new_c->i,new_c->l,new_c->k,(int)p.sort,i,p.fortran_number,c->x0,p.x,c->x0+c->hx);
// printf("c %3d (%d,%d,%d)->(%d,%d,%d) s %d p %d FN %5d Y: %15.5e<%23.16e<%15.5e \n",n,c->i,c->l,c->k,new_c->i,new_c->l,new_c->k,(int)p.sort,i,p.fortran_number,c->y0,p.y,c->y0+c->hy);
// printf("c %3d (%d,%d,%d)->(%d,%d,%d) s %d p %d FN %5d Z: %15.5e<%23.16e<%15.5e \n",n,c->i,c->l,c->k,new_c->i,new_c->l,new_c->k,(int)p.sort,i,p.fortran_number,c->z0,p.z,c->z0+c->hz);
// }
#endif
// }
}
// if(first == 1)
// {
// do{
// busy = atomicCAS(&(c->busyParticleArray),0,1);
// busy = ((c->busyParticleArra == 0 )? 1: c->busyParticleArra)
// busy = ((c->busyParticleArra == 1 )? 0: c->busyParticleArra)
// }while(busy == 1);
while (atomicCAS(&(c->busyParticleArray),0,1)) {}
c->removeParticleFromSurfaceDevice(i,&p,&(c->number_of_particles));
//c->busyParticleArray = 0;
atomicExch(&(c->busyParticleArray),0u);
i--;
// first = 0;
// }
// if(c->i == 99 && c->l == 0 && c->k == 3)
// {
// c->printCellParticles();
// }
//do{
// busy = atomicCAS(&(new_c->busyParticleArray),0,1);
//}while(busy == 1);
while (atomicCAS(&(new_c->busyParticleArray),0,1)) {}
new_c->Insert(p);
#ifdef STRAY_DEBUG_PRINTS
printf("STRAY-INSERT step %d %3d %d %d sort %d particle %d FORTRAN %5d X: %15.5e < %25.17e < %15.5e \n",
nt,
new_c->i,new_c->l,new_c->k,(int)p.sort,i,p.fortran_number,new_c->x0,p.x,new_c->x0+new_c->hx);
#endif
//new_c->busyParticleArray = 0;
atomicExch(&(new_c->busyParticleArray),0u);
if((p.fortran_number == 2498) && (p.sort == BEAM_ELECTRON))
{
// new_c->printCellParticles();
}
if(c->i == 99 && c->l == 0 && c->k == 3)
{
// new_c->printCellParticles();
}
}
}
// c->printCellParticles("STRAY-FINAL",nt);
}
__device__ double checkCurrentComponentImpact(
CurrentTensorComponent *t1,CurrentTensorComponent *t2,
int i,int l, int k,int pqr2
)
{
double res = 0;
if((t1->i11 == i && t1->i12 == l && t1->i13 == k && (fabs(t1->t[0]) > 1e-15))) res = t1->t[0];
if((t1->i21 == i && t1->i22 == l && t1->i23 == k && (fabs(t1->t[1]) > 1e-15))) res = t1->t[1];
if((t1->i31 == i && t1->i32 == l && t1->i33 == k && (fabs(t1->t[2]) > 1e-15))) res = t1->t[2];
if((t1->i41 == i && t1->i42 == l && t1->i43 == k && (fabs(t1->t[3]) > 1e-15))) res = t1->t[3];
if(pqr2 == 2)
{
if((t2->i11 == i && t2->i12 == l && t2->i13 == k && (fabs(t2->t[0]) > 1e-15))) res = t2->t[0];
if((t2->i21 == i && t2->i22 == l && t2->i23 == k && (fabs(t2->t[1]) > 1e-15))) res = t2->t[1];
if((t2->i31 == i && t2->i32 == l && t2->i33 == k && (fabs(t2->t[2]) > 1e-15))) res = t2->t[2];
if((t2->i41 == i && t2->i42 == l && t2->i43 == k && (fabs(t2->t[3]) > 1e-15))) res = t2->t[3];
}
return res;
}
__device__ void writeCurrentComponent(CellDouble *J,
CurrentTensorComponent *t1,CurrentTensorComponent *t2,int pqr2)
{
cuda_atomicAdd(&(J->M[t1->i11][t1->i12][t1->i13]),t1->t[0]);
cuda_atomicAdd(&(J->M[t1->i21][t1->i22][t1->i23]),t1->t[1]);
cuda_atomicAdd(&(J->M[t1->i31][t1->i32][t1->i33]),t1->t[2]);
cuda_atomicAdd(&(J->M[t1->i41][t1->i42][t1->i43]),t1->t[3]);
if(pqr2 == 2)
{
cuda_atomicAdd(&(J->M[t2->i11][t2->i12][t2->i13]),t2->t[0]);
cuda_atomicAdd(&(J->M[t2->i21][t2->i22][t2->i23]),t2->t[1]);
cuda_atomicAdd(&(J->M[t2->i31][t2->i32][t2->i33]),t2->t[2]);
cuda_atomicAdd(&(J->M[t2->i41][t2->i42][t2->i43]),t2->t[3]);
}
}
__device__ void copyCellDouble(CellDouble *dst,CellDouble *src,unsigned int n,uint3 block)
{
if(n < CellExtent*CellExtent*CellExtent)
{
double *d_dst,*d_src;//,t;
d_dst = (double *)(dst->M);
d_src = (double *)(src->M);
// t = d_dst[n];
//
// if(fabs(d_dst[n] - d_src[n]) > 1e-15)
// {
// printf("block %5d %3d %3d thread %5d CCD t %15.5e dst %15.5e src %15.5e dst %p src %p d_dst[n] %p d_src[n] %p \n",
// block.x,block.y,block.z,threadIdx.x,t,d_dst[n],d_src[n],dst,src,&(d_dst[n]),&(d_src[n]));
// }
d_dst[n] = d_src[n];
}
}
__device__ void addCellDouble(CellDouble *dst,CellDouble *src,unsigned int n,uint3 block,int dim)
{
if(n < CellExtent*CellExtent*CellExtent)
{
double *d_dst,*d_src;//,t;
d_dst = (double *)(dst->M);
d_src = (double *)(src->M);
// t = d_dst[n];
//
// if(fabs(d_dst[n] - d_src[n]) > 1e-15)
// {
// printf("block %5d %3d %3d thread %5d CCD t %15.5e dst %15.5e src %15.5e dst %p src %p d_dst[n] %p d_src[n] %p \n",
// block.x,block.y,block.z,threadIdx.x,t,d_dst[n],d_src[n],dst,src,&(d_dst[n]),&(d_src[n]));
// }
for (int i = 0;i < dim;i++)
{
CellDouble *t = &(src[i]);
d_src = (double *)(t->M);
d_dst[n] += d_src[n];
}
}
}
__device__ void setCellDoubleToZero(CellDouble *dst,unsigned int n)
{
if(n < CellExtent*CellExtent*CellExtent)
{
double *d_dst;//*d_src;//,t;
d_dst = (double *)(dst->M);
// d_src = (double *)(src->M);
// t = d_dst[n];
//
// if(fabs(d_dst[n] - d_src[n]) > 1e-15)
// {
// printf("block %5d %3d %3d thread %5d CCD t %15.5e dst %15.5e src %15.5e dst %p src %p d_dst[n] %p d_src[n] %p \n",
// block.x,block.y,block.z,threadIdx.x,t,d_dst[n],d_src[n],dst,src,&(d_dst[n]),&(d_src[n]));
// }
d_dst[n] = 0.0;
}
}
__global__ void GPU_GetCellNumbers(Cell **cells,
int *numbers)
{
Cell *c;//,nc;
c = cells[blockIdx.x];
numbers[blockIdx.x] = (*c).number_of_particles;
}
__device__ void assignSharedWithLocal(
CellDouble **c_jx,
CellDouble **c_jy,
CellDouble **c_jz,
CellDouble **c_ex,
CellDouble **c_ey,
CellDouble **c_ez,
CellDouble **c_hx,
CellDouble **c_hy,
CellDouble **c_hz,
CellDouble *fd)
{
*c_ex = &(fd[0]);
*c_ey = &(fd[1]);
*c_ez = &(fd[2]);
*c_hx = &(fd[3]);
*c_hy = &(fd[4]);
*c_hz = &(fd[5]);
*c_jx = &(fd[6]);
*c_jy = &(fd[7]);
*c_jz = &(fd[8]);
}
__device__ void copyFieldsToSharedMemory(
CellDouble *c_jx,
CellDouble *c_jy,
CellDouble *c_jz,
CellDouble *c_ex,
CellDouble *c_ey,
CellDouble *c_ez,
CellDouble *c_hx,
CellDouble *c_hy,
CellDouble *c_hz,
Cell *c,
int index,
dim3 blockId,
int blockDimX
)
{
//int index = threadIdx.x;
while(index < CellExtent*CellExtent*CellExtent)
{
// if(index < 125) {
copyCellDouble(c_ex,c->Ex,index,blockId);
copyCellDouble(c_ey,c->Ey,index,blockId);
copyCellDouble(c_ez,c->Ez,index,blockId);
copyCellDouble(c_hx,c->Hx,index,blockId);
copyCellDouble(c_hy,c->Hy,index,blockId);
copyCellDouble(c_hz,c->Hz,index,blockId);
copyCellDouble(c_jx,c->Jx,index,blockId);
copyCellDouble(c_jy,c->Jy,index,blockId);
copyCellDouble(c_jz,c->Jz,index,blockId);
//}
index += blockDimX;
}
__syncthreads();
}
__device__ void set_cell_double_arrays_to_zero(
CellDouble *m_c_jx,
CellDouble *m_c_jy,
CellDouble *m_c_jz,
int size,
int index,
int blockDimX
)
{
//int index = threadIdx.x;
for(int i = 0;i < size;i++)
{
setCellDoubleToZero(&(m_c_jx[i]),index);
setCellDoubleToZero(&(m_c_jy[i]),index);
setCellDoubleToZero(&(m_c_jz[i]),index);
}
// setCellDoubleToZero(&(m_c_jx[1]),index,blockId);
// setCellDoubleToZero(&(m_c_jx[0]),index,blockId);
// setCellDoubleToZero(&(m_c_jx[0]),index,blockId);
while(index < CellExtent*CellExtent*CellExtent)
{
// if(index < 125) {
for(int i = 0;i < size;i++)
{
// setCellDoubleToZero(&(m_c_jx[i]),index,blockId);
// setCellDoubleToZero(&(m_c_jy[i]),index,blockId);
// setCellDoubleToZero(&(m_c_jz[i]),index,blockId);
}
index += blockDimX;
}
__syncthreads();
}
__device__ void set_cell_double_array_to_zero(CurrentTensorComponent *ca,int length)
{
for(int i = 0; i<= 100;i++)
{
ca[i].t[0] = 0.0;
}
}
__device__ void MoveParticlesInCell(
CellDouble *c_ex,
CellDouble *c_ey,
CellDouble *c_ez,
CellDouble *c_hx,
CellDouble *c_hy,
CellDouble *c_hz,
Cell *c,
int index,
int blockDimX//,
// double mass,
// double q_mass
)
{
// CurrentTensor t1,t2;
int pqr2;
// Particle p;
CellTotalField cf;
while(index < c->number_of_particles)
{
cf.Ex = c->Ex;
cf.Ey = c->Ey;
cf.Ez = c->Ez;
cf.Hx = c->Hx;
cf.Hy = c->Hy;
cf.Hz = c->Hz;
c->MoveSingleParticle(index,cf);
index += blockDimX;
}
__syncthreads();
}
__device__ void assign_cell_double(CellDouble *a,CellDouble *b)
{
int i,l,k;
for(i = 0;i < CellExtent;i++)
{
for(l = 0;l < CellExtent;l++)
{
for(k = 0;k < CellExtent;k++)
{
a->M[i][l][k] = b->M[i][l][k];
}
}
}
}
__device__ void add_cell_double(CellDouble *a,CellDouble *b)
{
int i,l,k;
for(i = 0;i < CellExtent;i++)
{
for(l = 0;l < CellExtent;l++)
{
for(k = 0;k < CellExtent;k++)
{
a->M[i][l][k] += b->M[i][l][k];
}
}
}
}
__device__ void set_cell_double_array_to_zero(CellDouble *arr,int size)
{
int i,l,k;
CellDouble *a;
for(int num = 0;num < size;num++)
{
a = &(arr[num]);
for(i = 0;i < CellExtent;i++)
{
for(l = 0;l < CellExtent;l++)
{
for(k = 0;k < CellExtent;k++)
{
a->M[i][l][k] = 0.0;
}
}
}
}
}
__device__ void AccumulateCurrentWithParticlesInCell(
CellDouble *c_jx,
int CellDouble_array_dim,
CellDouble *c_jy,
CellDouble *c_jz,
Cell *c,
int index,
int blockDimX,
int nt
)
{
CurrentTensor t1,t2;
DoubleCurrentTensor dt,dt1;;
int pqr2;
while(index < c->number_of_particles)
{
c->AccumulateCurrentSingleParticle (index,&pqr2,&dt);
writeCurrentComponent(&(c_jx[index%CellDouble_array_dim]),&(dt.t1.Jx),&(dt.t2.Jx),pqr2);
writeCurrentComponent(&(c_jy[index%CellDouble_array_dim]),&(dt.t1.Jy),&(dt.t2.Jy),pqr2);
writeCurrentComponent(&(c_jz[index%CellDouble_array_dim]),&(dt.t1.Jz),&(dt.t2.Jz),pqr2);
// if(index%2 == 0)
// {
//
// }
// else
// {
// if(index%2 == 1)
// {
// writeCurrentComponent(&(c_jx[1]),&(dt.t1.Jx),&(dt.t2.Jx),pqr2);
// }
// else
// {
// writeCurrentComponent(&(c_jx[2]),&(dt.t1.Jx),&(dt.t2.Jx),pqr2);
// }
// }
// writeCurrentComponent(c_jy,&(dt.t1.Jy),&(dt.t2.Jy),pqr2);
//writeCurrentComponent(c_jz,&(dt.t1.Jz),&(dt.t2.Jz),pqr2);
index += blockDimX;
}
__syncthreads();
}
__device__ void copyFromSharedMemoryToCell(
CellDouble *c_jx,
CellDouble *c_jy,
CellDouble *c_jz,
Cell *c,
int index,
int blockDimX,
dim3 blockId
)
{
while(index < CellExtent*CellExtent*CellExtent)
{
copyCellDouble(c->Jx,c_jx,index,blockIdx);
copyCellDouble(c->Jy,c_jy,index,blockIdx);
copyCellDouble(c->Jz,c_jz,index,blockIdx);
index += blockDim.x;
}
c->busyParticleArray = 0;
}
__global__ void GPU_StepAllCells(GPUCell **cells//,
// int i,
// double *global_jx
// double mass,
// double q_mass
)
{
Cell *c,*c0 = cells[0];
__shared__ CellDouble fd[9];
CellDouble *c_jx,*c_jy,*c_jz,*c_ex,*c_ey,*c_ez,*c_hx,*c_hy,*c_hz;
// CurrentTensor t1,t2;
// int pqr2;
Particle p;
c = cells[ c0->getGlobalCellNumber(blockIdx.x,blockIdx.y,blockIdx.z)];
assignSharedWithLocal(&c_jx,&c_jy,&c_jz,&c_ex,&c_ey,&c_ez,&c_hx,&c_hy,&c_hz,fd);
copyFieldsToSharedMemory(c_jx,c_jy,c_jz,c_ex,c_ey,c_ez,c_hx,c_hy,c_hz,c,
threadIdx.x,blockIdx,blockDim.x);
MoveParticlesInCell(c_ex,c_ey,c_ez,c_hx,c_hy,c_hz,
c,threadIdx.x,blockDim.x);//,mass,q_mass);
// MoveAccCurrent(c_ex,c_ey,c_ez,c_hx,c_hy,c_hz,c_jx,c_jy,c_jz,
// c,threadIdx.x,blockDim.x,mass,q_mass);
// WriteCurrents(c_jx,c_jy,c_jz,c_jx,c_jy,c_jz,
// c,threadIdx.x,blockDim.x,mass,q_mass);
copyFromSharedMemoryToCell(c_jx,c_jy,c_jz,c,threadIdx.x,blockDim.x,blockIdx);
}
__global__ void GPU_CurrentsAllCells(GPUCell **cells,int nt
// int i,
// double *global_jx,
// double mass,
// double q_mass
// CellDouble *
)
{
Cell *c,*c0 = cells[0];
__shared__ CellDouble fd[9];
CellDouble *c_jx,*c_jy,*c_jz,*c_ex,*c_ey,*c_ez,*c_hx,*c_hy,*c_hz;
// CurrentTensor t1,t2;
// int pqr2;
// Particle p;
__shared__ CellDouble m_c_jx[CURRENT_SUM_BUFFER_LENGTH];
__shared__ CellDouble m_c_jy[CURRENT_SUM_BUFFER_LENGTH];
__shared__ CellDouble m_c_jz[CURRENT_SUM_BUFFER_LENGTH];
c = cells[ c0->getGlobalCellNumber(blockIdx.x,blockIdx.y,blockIdx.z)];
assignSharedWithLocal(&c_jx,&c_jy,&c_jz,&c_ex,&c_ey,&c_ez,&c_hx,&c_hy,&c_hz,fd);
copyFieldsToSharedMemory(c_jx,c_jy,c_jz,c_ex,c_ey,c_ez,c_hx,c_hy,c_hz,c,
threadIdx.x,blockIdx,blockDim.x);
set_cell_double_arrays_to_zero(m_c_jx,m_c_jy,m_c_jz,CURRENT_SUM_BUFFER_LENGTH,
threadIdx.x,blockDim.x);
AccumulateCurrentWithParticlesInCell(m_c_jx,CURRENT_SUM_BUFFER_LENGTH,m_c_jy,m_c_jz,
c,threadIdx.x,blockDim.x,nt);
addCellDouble(c_jx,&(m_c_jx[0]),threadIdx.x,blockIdx,CURRENT_SUM_BUFFER_LENGTH);
addCellDouble(c_jy,&(m_c_jy[0]),threadIdx.x,blockIdx,CURRENT_SUM_BUFFER_LENGTH);
addCellDouble(c_jz,&(m_c_jz[0]),threadIdx.x,blockIdx,CURRENT_SUM_BUFFER_LENGTH);
// addCellDouble(c_jx,&(m_c_jx[1]),threadIdx.x,blockIdx,3);
// addCellDouble(c_jx,&(m_c_jx[2]),threadIdx.x,blockIdx,3);
copyFromSharedMemoryToCell(c_jx,c_jy,c_jz,c,threadIdx.x,blockDim.x,blockIdx);
// if((c->i == 28) && (c->l == 3) && (c->k == 2) && (nt == 1))
// {
// for(int i = 0;i < CellExtent;i++)
// {
// for(int l = 0;l < CellExtent;l++)
// {
// for(int k = 0;k < CellExtent;k++)
// {
// if(fabs(c_jx->M[i][l][k]) > 1e-15)
// {
// printf("c_jx %5d %3d %3d %25.15e thread %5d %3d %3d block %5d %3d %3d nt %3d\n",
// i,l,k,
// c_jx->M[i][l][k],
// threadIdx.x,threadIdx.y, threadIdx.z,
// blockIdx.x,blockIdx.y,blockIdx.z,nt);
// }
// }
// }
// }
// }
}
__global__ void GPU_ControlAllCellsCurrents(Cell **cells,int n,int i,CellDouble *jx,CellDouble *jy,CellDouble *jz)
{
// unsigned int nx = blockIdx.x;
// unsigned int ny = blockIdx.y;
// unsigned int nz = blockIdx.z;
// int i,l,k;
Cell *c,*c0 = cells[0],nc;
//double t;
// __shared__ extern CellDouble fd[9];
//double *src;
//int pqr2;
// CurrentTensor t1,t2;
// c = cells[ c0->getGlobalCellNumber(nx,ny,nz)];
c = cells[ n ];
nc = *c;
// double cjx,cjy,cjz;
// cjx = CheckArraySize((double *)jx,(double *)(nc.Jx),sizeof(CellDouble)/sizeof(double));
// cjy = CheckArraySize((double *)jy,(double *)(nc.Jy),sizeof(CellDouble)/sizeof(double));
// cjz = CheckArraySize((double *)jz,(double *)(nc.Jz),sizeof(CellDouble)/sizeof(double));
#ifdef GPU_CONTROL_ALL_CELLS_CURRENTS_PRINT
// printf("cell (%d,%d,%d) particle %d currents %.5f %.5f %.5f \n",nc.i,nc.l,nc.k,i,cjx,cjy,cjz);
#endif
}
__host__ __device__
void emh2_Element(
Cell *c,
int i,int l,int k,
double *Q,double *H)
{
int n = c->getGlobalCellNumber(i,l,k);
H[n] += Q[n];
}
__global__
void GPU_emh2(
GPUCell **cells,
int i_s,int l_s,int k_s,
double *Q,double *H
)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
Cell *c0 = cells[0];
emh2_Element(c0,i_s+nx,l_s+ny,k_s+nz,Q,H);
}
__host__ __device__
void emh1_Element(
Cell *c,
int3 i,
double *Q,double *H,double *E1, double *E2,
double c1,double c2,
int3 d1,int3 d2)
{
int n = c->getGlobalCellNumber(i.x,i.y,i.z);
int n1 = c->getGlobalCellNumber(i.x+d1.x,i.y+d1.y,i.z+d1.z);
int n2 = c->getGlobalCellNumber(i.x+d2.x,i.y+d2.y,i.z+d2.z);
double e1_n1 = E1[n1];
double e1_n = E1[n];
double e2_n2 = E2[n2];
double e2_n = E2[n];
double t = 0.5*(c1*(e1_n1 - e1_n)- c2*(e2_n2 - e2_n));
Q[n] = t;
H[n] += Q[n];
}
__global__
void GPU_emh1(
GPUCell **cells,
double *Q,double *H,double *E1, double *E2,
double c1,double c2,
int3 d1,int3 d2
)
{
int3 i3 = make_int3(blockIdx.x,blockIdx.y,blockIdx.z);
Cell *c0 = cells[0];
emh1_Element(c0,i3,Q,H,E1,E2,c1,c2,d1,d2);
}
__host__ __device__
void emeElement(Cell *c,int3 i,double *E,double *H1, double *H2,
double *J,double c1,double c2, double tau,
int3 d1,int3 d2
)
{
int n = c->getGlobalCellNumber(i.x,i.y,i.z);
int n1 = c->getGlobalCellNumber(i.x+d1.x,i.y+d1.y,i.z+d1.z);
int n2 = c->getGlobalCellNumber(i.x+d2.x,i.y+d2.y,i.z+d2.z);
E[n] += c1*(H1[n] - H1[n1]) - c2*(H2[n] - H2[n2]) - tau*J[n];
}
__host__ __device__
void periodicElement(Cell *c,int i,int k,double *E,int dir, int to,int from)
{
int n = c->getGlobalBoundaryCellNumber(i,k,dir,to);
int n1 = c->getGlobalBoundaryCellNumber(i,k,dir,from);
E[n] = E[n1];
}
__global__ void GPU_periodic(GPUCell **cells,
int i_s,int k_s,
double *E,int dir, int to,int from)
{
unsigned int nx = blockIdx.x;
//unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
Cell *c0 = cells[0];
periodicElement(c0,nx+i_s,nz+k_s,E, dir,to,from);
}
__host__ __device__
void periodicCurrentElement(Cell *c,int i,int k,double *E,int dir, int dirE,int N)
{
int n1 = c->getGlobalBoundaryCellNumber(i,k,dir,1);
int n_Nm1 = c->getGlobalBoundaryCellNumber(i,k,dir,N-1);
if(dir != dirE)
{
E[n1] += E[n_Nm1];
}
if(dir != 1 || dirE != 1)
{
E[n_Nm1] = E[n1];
}
int n_Nm2 = c->getGlobalBoundaryCellNumber(i,k,dir,N-2);
int n0 = c->getGlobalBoundaryCellNumber(i,k,dir,0);
#ifdef PERIODIC_CURRENT_PRINTS
printf("%e %e \n",E[n0],E[n_Nm2]);
#endif
E[n0] += E[n_Nm2];
E[n_Nm2] = E[n0];
}
__global__ void GPU_CurrentPeriodic(GPUCell **cells,double *E,int dirE, int dir,
int i_s,int k_s,int N)
{
unsigned int nx = blockIdx.x;
//unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
Cell *c0 = cells[0];
periodicCurrentElement(c0,nx+i_s,nz+k_s,E, dir,dirE,N);
}
__global__ void GPU_eme(
GPUCell **cells,
int3 s,
double *E,double *H1, double *H2,
double *J,double c1,double c2, double tau,
int3 d1,int3 d2
)
{
unsigned int nx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int ny = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int nz = blockIdx.z*blockDim.z + threadIdx.z;
Cell *c0 = cells[0];
s.x += nx;
s.y += ny;
s.z += nz;
emeElement(c0,s,E,H1,H2,J,c1,c2,tau,d1,d2);
}
__global__ void copy_pointers(Cell **cells,int *d_flags,double_pointer *d_pointers)
{
Cell *c = cells[blockIdx.x];
c->flag_wrong_current_cell = d_flags[blockIdx.x];
c->d_wrong_current_particle_attributes = d_pointers[blockIdx.x];
}
| 9c197cf8e8f40ca70d31bc88a0b475eabfaeab16.cu |
__global__
void GPU_SetAllCurrentsToZero(GPUCell **cells)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
//int i,l,k;
Cell *c,*c0 = cells[0],nc;
//double t;
// __shared__ extern CellDouble fd[9];
//double *src;//,*dst;
c = cells[ c0->getGlobalCellNumber(nx,ny,nz)];
nc = *c;
nc.SetAllCurrentsToZero(threadIdx);
}
__global__
void GPU_SetFieldsToCells(GPUCell **cells,
double *Ex,double *Ey,double *Ez,
double *Hx,double *Hy,double *Hz
)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
//int i,l,k;
Cell *c,*c0 = cells[0];
//double t;
c = cells[ c0->getGlobalCellNumber(nx,ny,nz)];
c->readFieldsFromArrays(Ex,Ey,Ez,Hx,Hy,Hz,threadIdx);
}
__host__ __device__
double CheckArraySize(double* a, double* dbg_a,int size)
{
// Cell<Particle> c = (*AllCells)[0];
int wrong = 0;
#ifdef CHECK_ARRAY_SIZE_DEBUG_PRINTS
printf("begin array checking1=============================\n");
#endif
for(int n = 0;n < size;n++)
{
//double t = a[n];
// double dt = dbg_a[n];
if(fabs(a[n] - dbg_a[n]) > SIZE_TOLERANCE)
{
//int3 i = c.getCellTripletNumber(n);
#ifdef CHECK_ARRAY_SIZE_DEBUG_PRINTS
printf("n %5d %15.5e dbg %15.5e diff %15.5e wrong %10d \n",
n,a[n],dbg_a[n],fabs(a[n] - dbg_a[n]),wrong++);
#endif
}
}
#ifdef CHECK_ARRAY_SIZE_DEBUG_PRINTS
printf(" end array checking=============================\n");
#endif
return (1.0-((double)wrong/(size)));
}
__global__ void GPU_WriteAllCurrents(GPUCell **cells,int n0,
double *jx,double *jy,double *jz,double *rho)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
Cell *c,*c0 = cells[0];
// __shared__ extern CellDouble fd[9];
c = cells[ c0->getGlobalCellNumber(nx,ny,nz)];
int i1,l1,k1;
i1 = threadIdx.x;
l1 = threadIdx.y;
k1 = threadIdx.z;
int n = c->getFortranCellNumber(c->i+i1-1,c->l+l1-1,c->k+k1-1);
if (n < 0 ) n = -n;
double t,t_x,t_y;
t_x = c->Jx->M[i1][l1][k1];
int3 i3 = c->getCellTripletNumber(n);
cuda_atomicAdd(&(jx[n]),t_x);
t_y= c->Jy->M[i1][l1][k1];
cuda_atomicAdd(&(jy[n]),t_y);
t = c->Jz->M[i1][l1][k1];
cuda_atomicAdd(&(jz[n]),t);
}
__global__ void GPU_WriteControlSystem(Cell **cells)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
// int i,l,k;
Cell *c,*c0 = cells[0],nc;
//double t;
// __shared__ extern CellDouble fd[9];
//double *src; //,*dst;
// int pqr2;
//CurrentTensor t1,t2;
c = cells[ c0->getGlobalCellNumber(nx,ny,nz)];
// c = cells[ n ];
nc = *c;
nc.SetControlSystemToParticles();
}
//TODO : 1. 3 separate kernels :
// A. form 3x3x3 array with number how many to fly and departure list with start positions in 3x3x3 array
// B. func to get 3x3x3 indexes from a pair of cell numbers, to and from function
// C. 2nd kernel to write arrival 3x3x3 matrices
/// D. 3rd kernel to form arrival positions in the particle list
// E. 4th to write arriving particles
__global__ void GPU_MakeDepartureLists(GPUCell **cells,int nt,int *d_stage)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
int ix,iy,iz;//,n;
Particle p;
Cell *c,*c0 = cells[0],nc;//,*new_c;
c = cells[c0->getGlobalCellNumber(nx,ny,nz)];
c->departureListLength = 0;
for(ix = 0;ix < 3;ix++)
{
for(iy = 0;iy < 3;iy++)
{
for(iz = 0;iz < 3;iz++)
{
c->departure[ix][iy][iz] = 0;
// c->departureIndex[ix][iy][iz] = 0;
}
}
}
c->departureListLength = 0;
for(int num = 0;num < c->number_of_particles; num++)
{
p = c->readParticleFromSurfaceDevice(num);
c->flyDirection(&p,&ix,&iy,&iz);
c->writeParticleToSurface(num,&p);
if(!c->isPointInCell(p.GetX())) //check Paricle = operator !!!!!!!!!!!!!!!!!!!!!!!!!!!
{
// c->flyDirection(&p,&ix,&iy,&iz);
// printf("fly %d:(%d,%d,%d) %d \n",p.direction,ix,iy,iz,ix*9 +iy*3 +iz);
c->writeParticleToSurface(num,&p);
// if(p.direction == 0) printf("Blin-hren'\n");
// f(p.direction != (ix | (iy << 2) |(iz << 4))) printf("Blin-hren'^2\n");
if(c->departureListLength == PARTICLES_FLYING_ONE_DIRECTION)
{
d_stage[0] = TOO_MANY_PARTICLES;
d_stage[1] = c->i;
d_stage[2] = c->l;
d_stage[3] = c->k;
d_stage[1] = ix;
d_stage[2] = iy;
d_stage[3] = iz;
return;
}
c->departureListLength++;
int num1 = c->departure[ix][iy][iz];
c->departureList[ix][iy][iz][num1] = p;
c->departure[ix][iy][iz] += 1;
}
else
{
// printf("home %d:(%d,%d,%d) %d \n",p.direction,ix,iy,iz,ix*9 +iy*3 +iz);
}
}
}
__global__ void GPU_MakeDepartureLists_rm(GPUCell **cells,int nt,int *d_stage)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
int ix,iy,iz;//,n;
Particle p;
Cell *c,*c0 = cells[0],nc;//,*new_c;
c = cells[c0->getGlobalCellNumber(nx,ny,nz)];
for(int num = 0;num < c->number_of_particles; num++)
{
p = c->readParticleFromSurfaceDevice(num);
if(p.direction != 13) //check Paricle = operator !!!!!!!!!!!!!!!!!!!!!!!!!!!
{
c->removeParticleFromSurfaceDevice(num,&p,&(c->number_of_particles));
num--;
}
}
}
__global__ void GPU_RemoveDepartureParticles(GPUCell **cells,int nt,int *d_stage)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
int ix,iy,iz;//,n;
Particle p;
Cell *c,*c0 = cells[0],nc;//,*new_c;
c = cells[c0->getGlobalCellNumber(nx,ny,nz)];
for(int num = 0;num < c->number_of_particles; num++)
{
p = c->readParticleFromSurfaceDevice(num);
if(!c->isPointInCell(p.GetX())) //check Paricle = operator !!!!!!!!!!!!!!!!!!!!!!!!!!!
{
c->removeParticleFromSurfaceDevice(num,&p,&(c->number_of_particles));
// c->flyDirection(&p,&ix,&iy,&iz);
//
// //if(p.direction == 0) printf("Blin-hren'\n");
// //f(p.direction != (ix | (iy << 2) |(iz << 4))) printf("Blin-hren'^2\n");
// if(c->departureListLength == PARTICLES_FLYING_ONE_DIRECTION)
// {
// d_stage[0] = TOO_MANY_PARTICLES;
// d_stage[1] = c->i;
// d_stage[2] = c->l;
// d_stage[3] = c->k;
// d_stage[1] = ix;
// d_stage[2] = iy;
// d_stage[3] = iz;
// return;
// }
// c->departureListLength++;
// int num1 = c->departure[ix][iy][iz];
//
// c->departureList[ix][iy][iz][num1] = p;
//
// c->departure[ix][iy][iz] += 1;
//
// num--;
}
}
}
__device__ int d_comd(double a,double b)
{
return (fabs(a - b) < TOLERANCE);
}
__device__ int d_compare(Particle p,Particle p1)
{
int tx = d_comd(p.x,p1.x);
int ty = d_comd(p.y,p1.y);
int tz = d_comd(p.z,p1.z);
int tpx = d_comd(p.pu,p1.pu);
double dpx = fabs(p.pu - p1.pu);
int tpy = d_comd(p.pv,p1.pv);
int tpz = d_comd(p.pw,p1.pw);
return (tx && ty && tz && tpx && tpy && tpz);
}
__global__ void GPU_ArrangeFlights(GPUCell **cells,int nt, int *d_stage)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
int ix,iy,iz,snd_ix,snd_iy,snd_iz,num,n;
Particle p,p1;
Cell *c,*c0 = cells[0],nc,*snd_c;
c = cells[ n = c0->getGlobalCellNumber(nx,ny,nz)];
for(ix = 0;ix < 3;ix++)
for(iy = 0;iy < 3;iy++)
for(iz = 0;iz < 3;iz++)
{
n = c0->getWrapCellNumber(nx+ix-1,ny+iy-1,nz+iz-1);
snd_c = cells[ n ];
for(int j = 0;j < snd_c->number_of_particles;j++)
{
p1 = snd_c->readParticleFromSurfaceDevice(j);
// snd_ix = ix;
// snd_iy = iy;
// snd_iz = iz;
//
// c->inverseDirection(&snd_ix,&snd_iy,&snd_iz);
//
// num = snd_c->departure[snd_ix][snd_iy][snd_iz];
//
//
//
// for(int i = 0;i < num;i++)
// {
// p = snd_c->departureList[snd_ix][snd_iy][snd_iz][i];
//
//
//
if(p1.direction != 13)
{
p1.direction = 13;
c->Insert(p1);
}
//
// }
}
}
}
__global__ void GPU_CollectStrayParticles(Cell **cells,int nt
// int n,
// int i,
// double mass,
// double q_mass,
// double *p_control,
// int jmp
)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
// int busy;
Particle p;
int n;
// int i,l,k;
Cell *c,*c0 = cells[0],nc,*new_c;
//int first = 1;
c = cells[ n = c0->getGlobalCellNumber(nx,ny,nz)];
for(int i = 0;i < c->number_of_particles; i++)
{
p = c->readParticleFromSurfaceDevice(i);
#ifdef STRAY_DEBUG_PRINTS
//if((p.fortran_number == 2498) && (p.sort == BEAM_ELECTRON))
// {
printf("STRAY-BASIC step %d cell %3d %d %d sort %d particle %d FORTRAN %5d X: %15.5e < %15.5e < %15.5e Y: %15.5e < %15.5e < %15.5e Z: %15.5e < %15.5e < %15.5e \n",
nt,c->i,c->l,c->k,(int)p.sort,i,p.fortran_number,
c->x0,p.x,c->x0+c->hx,
c->y0,p.y,c->y0+c->hy,
c->z0,p.z,c->z0+c->hz
);
//}
#endif
if(!c->isPointInCell(p.GetX()))// || (p.fortran_number == 753) )//|| (p.fortran_number == 10572))
{
#ifdef STRAY_DEBUG_PRINTS
printf("STRAY-OUT step %3d cell %3d %d %d sort %d particle %d FORTRAN %5d X: %15.5e < %25.17e < %15.5e \n",
nt,c->i,c->l,c->k,(int)p.sort,i,p.fortran_number,c->x0,p.x,c->x0+c->hx);
#endif
int new_n = c->getPointCell(p.GetX());
new_c = cells[new_n];
if(c->i == 99 && c->l == 0 && c->k == 3)
{
// c->printCellParticles();
// if(c->i >= c->Nx-1)
// {
#ifdef STRAY_DEBUG_PRINTS
// if((p.fortran_number == 2498) && (p.sort == BEAM_ELECTRON))
// {
// printf("c %3d (%d,%d,%d)->(%d,%d,%d) s %d p %d FN %5d X: %15.5e<%23.16e<%15.5e \n",n,c->i,c->l,c->k,new_c->i,new_c->l,new_c->k,(int)p.sort,i,p.fortran_number,c->x0,p.x,c->x0+c->hx);
// printf("c %3d (%d,%d,%d)->(%d,%d,%d) s %d p %d FN %5d Y: %15.5e<%23.16e<%15.5e \n",n,c->i,c->l,c->k,new_c->i,new_c->l,new_c->k,(int)p.sort,i,p.fortran_number,c->y0,p.y,c->y0+c->hy);
// printf("c %3d (%d,%d,%d)->(%d,%d,%d) s %d p %d FN %5d Z: %15.5e<%23.16e<%15.5e \n",n,c->i,c->l,c->k,new_c->i,new_c->l,new_c->k,(int)p.sort,i,p.fortran_number,c->z0,p.z,c->z0+c->hz);
// }
#endif
// }
}
// if(first == 1)
// {
// do{
// busy = atomicCAS(&(c->busyParticleArray),0,1);
// busy = ((c->busyParticleArra == 0 )? 1: c->busyParticleArra)
// busy = ((c->busyParticleArra == 1 )? 0: c->busyParticleArra)
// }while(busy == 1);
while (atomicCAS(&(c->busyParticleArray),0,1)) {}
c->removeParticleFromSurfaceDevice(i,&p,&(c->number_of_particles));
//c->busyParticleArray = 0;
atomicExch(&(c->busyParticleArray),0u);
i--;
// first = 0;
// }
// if(c->i == 99 && c->l == 0 && c->k == 3)
// {
// c->printCellParticles();
// }
//do{
// busy = atomicCAS(&(new_c->busyParticleArray),0,1);
//}while(busy == 1);
while (atomicCAS(&(new_c->busyParticleArray),0,1)) {}
new_c->Insert(p);
#ifdef STRAY_DEBUG_PRINTS
printf("STRAY-INSERT step %d %3d %d %d sort %d particle %d FORTRAN %5d X: %15.5e < %25.17e < %15.5e \n",
nt,
new_c->i,new_c->l,new_c->k,(int)p.sort,i,p.fortran_number,new_c->x0,p.x,new_c->x0+new_c->hx);
#endif
//new_c->busyParticleArray = 0;
atomicExch(&(new_c->busyParticleArray),0u);
if((p.fortran_number == 2498) && (p.sort == BEAM_ELECTRON))
{
// new_c->printCellParticles();
}
if(c->i == 99 && c->l == 0 && c->k == 3)
{
// new_c->printCellParticles();
}
}
}
// c->printCellParticles("STRAY-FINAL",nt);
}
__device__ double checkCurrentComponentImpact(
CurrentTensorComponent *t1,CurrentTensorComponent *t2,
int i,int l, int k,int pqr2
)
{
double res = 0;
if((t1->i11 == i && t1->i12 == l && t1->i13 == k && (fabs(t1->t[0]) > 1e-15))) res = t1->t[0];
if((t1->i21 == i && t1->i22 == l && t1->i23 == k && (fabs(t1->t[1]) > 1e-15))) res = t1->t[1];
if((t1->i31 == i && t1->i32 == l && t1->i33 == k && (fabs(t1->t[2]) > 1e-15))) res = t1->t[2];
if((t1->i41 == i && t1->i42 == l && t1->i43 == k && (fabs(t1->t[3]) > 1e-15))) res = t1->t[3];
if(pqr2 == 2)
{
if((t2->i11 == i && t2->i12 == l && t2->i13 == k && (fabs(t2->t[0]) > 1e-15))) res = t2->t[0];
if((t2->i21 == i && t2->i22 == l && t2->i23 == k && (fabs(t2->t[1]) > 1e-15))) res = t2->t[1];
if((t2->i31 == i && t2->i32 == l && t2->i33 == k && (fabs(t2->t[2]) > 1e-15))) res = t2->t[2];
if((t2->i41 == i && t2->i42 == l && t2->i43 == k && (fabs(t2->t[3]) > 1e-15))) res = t2->t[3];
}
return res;
}
__device__ void writeCurrentComponent(CellDouble *J,
CurrentTensorComponent *t1,CurrentTensorComponent *t2,int pqr2)
{
cuda_atomicAdd(&(J->M[t1->i11][t1->i12][t1->i13]),t1->t[0]);
cuda_atomicAdd(&(J->M[t1->i21][t1->i22][t1->i23]),t1->t[1]);
cuda_atomicAdd(&(J->M[t1->i31][t1->i32][t1->i33]),t1->t[2]);
cuda_atomicAdd(&(J->M[t1->i41][t1->i42][t1->i43]),t1->t[3]);
if(pqr2 == 2)
{
cuda_atomicAdd(&(J->M[t2->i11][t2->i12][t2->i13]),t2->t[0]);
cuda_atomicAdd(&(J->M[t2->i21][t2->i22][t2->i23]),t2->t[1]);
cuda_atomicAdd(&(J->M[t2->i31][t2->i32][t2->i33]),t2->t[2]);
cuda_atomicAdd(&(J->M[t2->i41][t2->i42][t2->i43]),t2->t[3]);
}
}
__device__ void copyCellDouble(CellDouble *dst,CellDouble *src,unsigned int n,uint3 block)
{
if(n < CellExtent*CellExtent*CellExtent)
{
double *d_dst,*d_src;//,t;
d_dst = (double *)(dst->M);
d_src = (double *)(src->M);
// t = d_dst[n];
//
// if(fabs(d_dst[n] - d_src[n]) > 1e-15)
// {
// printf("block %5d %3d %3d thread %5d CCD t %15.5e dst %15.5e src %15.5e dst %p src %p d_dst[n] %p d_src[n] %p \n",
// block.x,block.y,block.z,threadIdx.x,t,d_dst[n],d_src[n],dst,src,&(d_dst[n]),&(d_src[n]));
// }
d_dst[n] = d_src[n];
}
}
__device__ void addCellDouble(CellDouble *dst,CellDouble *src,unsigned int n,uint3 block,int dim)
{
if(n < CellExtent*CellExtent*CellExtent)
{
double *d_dst,*d_src;//,t;
d_dst = (double *)(dst->M);
d_src = (double *)(src->M);
// t = d_dst[n];
//
// if(fabs(d_dst[n] - d_src[n]) > 1e-15)
// {
// printf("block %5d %3d %3d thread %5d CCD t %15.5e dst %15.5e src %15.5e dst %p src %p d_dst[n] %p d_src[n] %p \n",
// block.x,block.y,block.z,threadIdx.x,t,d_dst[n],d_src[n],dst,src,&(d_dst[n]),&(d_src[n]));
// }
for (int i = 0;i < dim;i++)
{
CellDouble *t = &(src[i]);
d_src = (double *)(t->M);
d_dst[n] += d_src[n];
}
}
}
__device__ void setCellDoubleToZero(CellDouble *dst,unsigned int n)
{
if(n < CellExtent*CellExtent*CellExtent)
{
double *d_dst;//*d_src;//,t;
d_dst = (double *)(dst->M);
// d_src = (double *)(src->M);
// t = d_dst[n];
//
// if(fabs(d_dst[n] - d_src[n]) > 1e-15)
// {
// printf("block %5d %3d %3d thread %5d CCD t %15.5e dst %15.5e src %15.5e dst %p src %p d_dst[n] %p d_src[n] %p \n",
// block.x,block.y,block.z,threadIdx.x,t,d_dst[n],d_src[n],dst,src,&(d_dst[n]),&(d_src[n]));
// }
d_dst[n] = 0.0;
}
}
__global__ void GPU_GetCellNumbers(Cell **cells,
int *numbers)
{
Cell *c;//,nc;
c = cells[blockIdx.x];
numbers[blockIdx.x] = (*c).number_of_particles;
}
__device__ void assignSharedWithLocal(
CellDouble **c_jx,
CellDouble **c_jy,
CellDouble **c_jz,
CellDouble **c_ex,
CellDouble **c_ey,
CellDouble **c_ez,
CellDouble **c_hx,
CellDouble **c_hy,
CellDouble **c_hz,
CellDouble *fd)
{
*c_ex = &(fd[0]);
*c_ey = &(fd[1]);
*c_ez = &(fd[2]);
*c_hx = &(fd[3]);
*c_hy = &(fd[4]);
*c_hz = &(fd[5]);
*c_jx = &(fd[6]);
*c_jy = &(fd[7]);
*c_jz = &(fd[8]);
}
__device__ void copyFieldsToSharedMemory(
CellDouble *c_jx,
CellDouble *c_jy,
CellDouble *c_jz,
CellDouble *c_ex,
CellDouble *c_ey,
CellDouble *c_ez,
CellDouble *c_hx,
CellDouble *c_hy,
CellDouble *c_hz,
Cell *c,
int index,
dim3 blockId,
int blockDimX
)
{
//int index = threadIdx.x;
while(index < CellExtent*CellExtent*CellExtent)
{
// if(index < 125) {
copyCellDouble(c_ex,c->Ex,index,blockId);
copyCellDouble(c_ey,c->Ey,index,blockId);
copyCellDouble(c_ez,c->Ez,index,blockId);
copyCellDouble(c_hx,c->Hx,index,blockId);
copyCellDouble(c_hy,c->Hy,index,blockId);
copyCellDouble(c_hz,c->Hz,index,blockId);
copyCellDouble(c_jx,c->Jx,index,blockId);
copyCellDouble(c_jy,c->Jy,index,blockId);
copyCellDouble(c_jz,c->Jz,index,blockId);
//}
index += blockDimX;
}
__syncthreads();
}
__device__ void set_cell_double_arrays_to_zero(
CellDouble *m_c_jx,
CellDouble *m_c_jy,
CellDouble *m_c_jz,
int size,
int index,
int blockDimX
)
{
//int index = threadIdx.x;
for(int i = 0;i < size;i++)
{
setCellDoubleToZero(&(m_c_jx[i]),index);
setCellDoubleToZero(&(m_c_jy[i]),index);
setCellDoubleToZero(&(m_c_jz[i]),index);
}
// setCellDoubleToZero(&(m_c_jx[1]),index,blockId);
// setCellDoubleToZero(&(m_c_jx[0]),index,blockId);
// setCellDoubleToZero(&(m_c_jx[0]),index,blockId);
while(index < CellExtent*CellExtent*CellExtent)
{
// if(index < 125) {
for(int i = 0;i < size;i++)
{
// setCellDoubleToZero(&(m_c_jx[i]),index,blockId);
// setCellDoubleToZero(&(m_c_jy[i]),index,blockId);
// setCellDoubleToZero(&(m_c_jz[i]),index,blockId);
}
index += blockDimX;
}
__syncthreads();
}
__device__ void set_cell_double_array_to_zero(CurrentTensorComponent *ca,int length)
{
for(int i = 0; i<= 100;i++)
{
ca[i].t[0] = 0.0;
}
}
__device__ void MoveParticlesInCell(
CellDouble *c_ex,
CellDouble *c_ey,
CellDouble *c_ez,
CellDouble *c_hx,
CellDouble *c_hy,
CellDouble *c_hz,
Cell *c,
int index,
int blockDimX//,
// double mass,
// double q_mass
)
{
// CurrentTensor t1,t2;
int pqr2;
// Particle p;
CellTotalField cf;
while(index < c->number_of_particles)
{
cf.Ex = c->Ex;
cf.Ey = c->Ey;
cf.Ez = c->Ez;
cf.Hx = c->Hx;
cf.Hy = c->Hy;
cf.Hz = c->Hz;
c->MoveSingleParticle(index,cf);
index += blockDimX;
}
__syncthreads();
}
__device__ void assign_cell_double(CellDouble *a,CellDouble *b)
{
int i,l,k;
for(i = 0;i < CellExtent;i++)
{
for(l = 0;l < CellExtent;l++)
{
for(k = 0;k < CellExtent;k++)
{
a->M[i][l][k] = b->M[i][l][k];
}
}
}
}
__device__ void add_cell_double(CellDouble *a,CellDouble *b)
{
int i,l,k;
for(i = 0;i < CellExtent;i++)
{
for(l = 0;l < CellExtent;l++)
{
for(k = 0;k < CellExtent;k++)
{
a->M[i][l][k] += b->M[i][l][k];
}
}
}
}
__device__ void set_cell_double_array_to_zero(CellDouble *arr,int size)
{
int i,l,k;
CellDouble *a;
for(int num = 0;num < size;num++)
{
a = &(arr[num]);
for(i = 0;i < CellExtent;i++)
{
for(l = 0;l < CellExtent;l++)
{
for(k = 0;k < CellExtent;k++)
{
a->M[i][l][k] = 0.0;
}
}
}
}
}
__device__ void AccumulateCurrentWithParticlesInCell(
CellDouble *c_jx,
int CellDouble_array_dim,
CellDouble *c_jy,
CellDouble *c_jz,
Cell *c,
int index,
int blockDimX,
int nt
)
{
CurrentTensor t1,t2;
DoubleCurrentTensor dt,dt1;;
int pqr2;
while(index < c->number_of_particles)
{
c->AccumulateCurrentSingleParticle (index,&pqr2,&dt);
writeCurrentComponent(&(c_jx[index%CellDouble_array_dim]),&(dt.t1.Jx),&(dt.t2.Jx),pqr2);
writeCurrentComponent(&(c_jy[index%CellDouble_array_dim]),&(dt.t1.Jy),&(dt.t2.Jy),pqr2);
writeCurrentComponent(&(c_jz[index%CellDouble_array_dim]),&(dt.t1.Jz),&(dt.t2.Jz),pqr2);
// if(index%2 == 0)
// {
//
// }
// else
// {
// if(index%2 == 1)
// {
// writeCurrentComponent(&(c_jx[1]),&(dt.t1.Jx),&(dt.t2.Jx),pqr2);
// }
// else
// {
// writeCurrentComponent(&(c_jx[2]),&(dt.t1.Jx),&(dt.t2.Jx),pqr2);
// }
// }
// writeCurrentComponent(c_jy,&(dt.t1.Jy),&(dt.t2.Jy),pqr2);
//writeCurrentComponent(c_jz,&(dt.t1.Jz),&(dt.t2.Jz),pqr2);
index += blockDimX;
}
__syncthreads();
}
__device__ void copyFromSharedMemoryToCell(
CellDouble *c_jx,
CellDouble *c_jy,
CellDouble *c_jz,
Cell *c,
int index,
int blockDimX,
dim3 blockId
)
{
while(index < CellExtent*CellExtent*CellExtent)
{
copyCellDouble(c->Jx,c_jx,index,blockIdx);
copyCellDouble(c->Jy,c_jy,index,blockIdx);
copyCellDouble(c->Jz,c_jz,index,blockIdx);
index += blockDim.x;
}
c->busyParticleArray = 0;
}
__global__ void GPU_StepAllCells(GPUCell **cells//,
// int i,
// double *global_jx
// double mass,
// double q_mass
)
{
Cell *c,*c0 = cells[0];
__shared__ CellDouble fd[9];
CellDouble *c_jx,*c_jy,*c_jz,*c_ex,*c_ey,*c_ez,*c_hx,*c_hy,*c_hz;
// CurrentTensor t1,t2;
// int pqr2;
Particle p;
c = cells[ c0->getGlobalCellNumber(blockIdx.x,blockIdx.y,blockIdx.z)];
assignSharedWithLocal(&c_jx,&c_jy,&c_jz,&c_ex,&c_ey,&c_ez,&c_hx,&c_hy,&c_hz,fd);
copyFieldsToSharedMemory(c_jx,c_jy,c_jz,c_ex,c_ey,c_ez,c_hx,c_hy,c_hz,c,
threadIdx.x,blockIdx,blockDim.x);
MoveParticlesInCell(c_ex,c_ey,c_ez,c_hx,c_hy,c_hz,
c,threadIdx.x,blockDim.x);//,mass,q_mass);
// MoveAccCurrent(c_ex,c_ey,c_ez,c_hx,c_hy,c_hz,c_jx,c_jy,c_jz,
// c,threadIdx.x,blockDim.x,mass,q_mass);
// WriteCurrents(c_jx,c_jy,c_jz,c_jx,c_jy,c_jz,
// c,threadIdx.x,blockDim.x,mass,q_mass);
copyFromSharedMemoryToCell(c_jx,c_jy,c_jz,c,threadIdx.x,blockDim.x,blockIdx);
}
__global__ void GPU_CurrentsAllCells(GPUCell **cells,int nt
// int i,
// double *global_jx,
// double mass,
// double q_mass
// CellDouble *
)
{
Cell *c,*c0 = cells[0];
__shared__ CellDouble fd[9];
CellDouble *c_jx,*c_jy,*c_jz,*c_ex,*c_ey,*c_ez,*c_hx,*c_hy,*c_hz;
// CurrentTensor t1,t2;
// int pqr2;
// Particle p;
__shared__ CellDouble m_c_jx[CURRENT_SUM_BUFFER_LENGTH];
__shared__ CellDouble m_c_jy[CURRENT_SUM_BUFFER_LENGTH];
__shared__ CellDouble m_c_jz[CURRENT_SUM_BUFFER_LENGTH];
c = cells[ c0->getGlobalCellNumber(blockIdx.x,blockIdx.y,blockIdx.z)];
assignSharedWithLocal(&c_jx,&c_jy,&c_jz,&c_ex,&c_ey,&c_ez,&c_hx,&c_hy,&c_hz,fd);
copyFieldsToSharedMemory(c_jx,c_jy,c_jz,c_ex,c_ey,c_ez,c_hx,c_hy,c_hz,c,
threadIdx.x,blockIdx,blockDim.x);
set_cell_double_arrays_to_zero(m_c_jx,m_c_jy,m_c_jz,CURRENT_SUM_BUFFER_LENGTH,
threadIdx.x,blockDim.x);
AccumulateCurrentWithParticlesInCell(m_c_jx,CURRENT_SUM_BUFFER_LENGTH,m_c_jy,m_c_jz,
c,threadIdx.x,blockDim.x,nt);
addCellDouble(c_jx,&(m_c_jx[0]),threadIdx.x,blockIdx,CURRENT_SUM_BUFFER_LENGTH);
addCellDouble(c_jy,&(m_c_jy[0]),threadIdx.x,blockIdx,CURRENT_SUM_BUFFER_LENGTH);
addCellDouble(c_jz,&(m_c_jz[0]),threadIdx.x,blockIdx,CURRENT_SUM_BUFFER_LENGTH);
// addCellDouble(c_jx,&(m_c_jx[1]),threadIdx.x,blockIdx,3);
// addCellDouble(c_jx,&(m_c_jx[2]),threadIdx.x,blockIdx,3);
copyFromSharedMemoryToCell(c_jx,c_jy,c_jz,c,threadIdx.x,blockDim.x,blockIdx);
// if((c->i == 28) && (c->l == 3) && (c->k == 2) && (nt == 1))
// {
// for(int i = 0;i < CellExtent;i++)
// {
// for(int l = 0;l < CellExtent;l++)
// {
// for(int k = 0;k < CellExtent;k++)
// {
// if(fabs(c_jx->M[i][l][k]) > 1e-15)
// {
// printf("c_jx %5d %3d %3d %25.15e thread %5d %3d %3d block %5d %3d %3d nt %3d\n",
// i,l,k,
// c_jx->M[i][l][k],
// threadIdx.x,threadIdx.y, threadIdx.z,
// blockIdx.x,blockIdx.y,blockIdx.z,nt);
// }
// }
// }
// }
// }
}
__global__ void GPU_ControlAllCellsCurrents(Cell **cells,int n,int i,CellDouble *jx,CellDouble *jy,CellDouble *jz)
{
// unsigned int nx = blockIdx.x;
// unsigned int ny = blockIdx.y;
// unsigned int nz = blockIdx.z;
// int i,l,k;
Cell *c,*c0 = cells[0],nc;
//double t;
// __shared__ extern CellDouble fd[9];
//double *src;
//int pqr2;
// CurrentTensor t1,t2;
// c = cells[ c0->getGlobalCellNumber(nx,ny,nz)];
c = cells[ n ];
nc = *c;
// double cjx,cjy,cjz;
// cjx = CheckArraySize((double *)jx,(double *)(nc.Jx),sizeof(CellDouble)/sizeof(double));
// cjy = CheckArraySize((double *)jy,(double *)(nc.Jy),sizeof(CellDouble)/sizeof(double));
// cjz = CheckArraySize((double *)jz,(double *)(nc.Jz),sizeof(CellDouble)/sizeof(double));
#ifdef GPU_CONTROL_ALL_CELLS_CURRENTS_PRINT
// printf("cell (%d,%d,%d) particle %d currents %.5f %.5f %.5f \n",nc.i,nc.l,nc.k,i,cjx,cjy,cjz);
#endif
}
__host__ __device__
void emh2_Element(
Cell *c,
int i,int l,int k,
double *Q,double *H)
{
int n = c->getGlobalCellNumber(i,l,k);
H[n] += Q[n];
}
__global__
void GPU_emh2(
GPUCell **cells,
int i_s,int l_s,int k_s,
double *Q,double *H
)
{
unsigned int nx = blockIdx.x;
unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
Cell *c0 = cells[0];
emh2_Element(c0,i_s+nx,l_s+ny,k_s+nz,Q,H);
}
__host__ __device__
void emh1_Element(
Cell *c,
int3 i,
double *Q,double *H,double *E1, double *E2,
double c1,double c2,
int3 d1,int3 d2)
{
int n = c->getGlobalCellNumber(i.x,i.y,i.z);
int n1 = c->getGlobalCellNumber(i.x+d1.x,i.y+d1.y,i.z+d1.z);
int n2 = c->getGlobalCellNumber(i.x+d2.x,i.y+d2.y,i.z+d2.z);
double e1_n1 = E1[n1];
double e1_n = E1[n];
double e2_n2 = E2[n2];
double e2_n = E2[n];
double t = 0.5*(c1*(e1_n1 - e1_n)- c2*(e2_n2 - e2_n));
Q[n] = t;
H[n] += Q[n];
}
__global__
void GPU_emh1(
GPUCell **cells,
double *Q,double *H,double *E1, double *E2,
double c1,double c2,
int3 d1,int3 d2
)
{
int3 i3 = make_int3(blockIdx.x,blockIdx.y,blockIdx.z);
Cell *c0 = cells[0];
emh1_Element(c0,i3,Q,H,E1,E2,c1,c2,d1,d2);
}
__host__ __device__
void emeElement(Cell *c,int3 i,double *E,double *H1, double *H2,
double *J,double c1,double c2, double tau,
int3 d1,int3 d2
)
{
int n = c->getGlobalCellNumber(i.x,i.y,i.z);
int n1 = c->getGlobalCellNumber(i.x+d1.x,i.y+d1.y,i.z+d1.z);
int n2 = c->getGlobalCellNumber(i.x+d2.x,i.y+d2.y,i.z+d2.z);
E[n] += c1*(H1[n] - H1[n1]) - c2*(H2[n] - H2[n2]) - tau*J[n];
}
__host__ __device__
void periodicElement(Cell *c,int i,int k,double *E,int dir, int to,int from)
{
int n = c->getGlobalBoundaryCellNumber(i,k,dir,to);
int n1 = c->getGlobalBoundaryCellNumber(i,k,dir,from);
E[n] = E[n1];
}
__global__ void GPU_periodic(GPUCell **cells,
int i_s,int k_s,
double *E,int dir, int to,int from)
{
unsigned int nx = blockIdx.x;
//unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
Cell *c0 = cells[0];
periodicElement(c0,nx+i_s,nz+k_s,E, dir,to,from);
}
__host__ __device__
void periodicCurrentElement(Cell *c,int i,int k,double *E,int dir, int dirE,int N)
{
int n1 = c->getGlobalBoundaryCellNumber(i,k,dir,1);
int n_Nm1 = c->getGlobalBoundaryCellNumber(i,k,dir,N-1);
if(dir != dirE)
{
E[n1] += E[n_Nm1];
}
if(dir != 1 || dirE != 1)
{
E[n_Nm1] = E[n1];
}
int n_Nm2 = c->getGlobalBoundaryCellNumber(i,k,dir,N-2);
int n0 = c->getGlobalBoundaryCellNumber(i,k,dir,0);
#ifdef PERIODIC_CURRENT_PRINTS
printf("%e %e \n",E[n0],E[n_Nm2]);
#endif
E[n0] += E[n_Nm2];
E[n_Nm2] = E[n0];
}
__global__ void GPU_CurrentPeriodic(GPUCell **cells,double *E,int dirE, int dir,
int i_s,int k_s,int N)
{
unsigned int nx = blockIdx.x;
//unsigned int ny = blockIdx.y;
unsigned int nz = blockIdx.z;
Cell *c0 = cells[0];
periodicCurrentElement(c0,nx+i_s,nz+k_s,E, dir,dirE,N);
}
__global__ void GPU_eme(
GPUCell **cells,
int3 s,
double *E,double *H1, double *H2,
double *J,double c1,double c2, double tau,
int3 d1,int3 d2
)
{
unsigned int nx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int ny = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int nz = blockIdx.z*blockDim.z + threadIdx.z;
Cell *c0 = cells[0];
s.x += nx;
s.y += ny;
s.z += nz;
emeElement(c0,s,E,H1,H2,J,c1,c2,tau,d1,d2);
}
__global__ void copy_pointers(Cell **cells,int *d_flags,double_pointer *d_pointers)
{
Cell *c = cells[blockIdx.x];
c->flag_wrong_current_cell = d_flags[blockIdx.x];
c->d_wrong_current_particle_attributes = d_pointers[blockIdx.x];
}
|
b86b68ccd87a35e469bb5e372b0db9a1dbd29c3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SPDX-FileCopyrightText: 2021 Benjamin Brock
//
// SPDX-License-Identifier: BSD-3-Clause
#include <bcl/bcl.hpp>
#include <bcl/backends/experimental/nvshmem/backend.hpp>
#include <limits>
#include <bcl/containers/experimental/cuda/launch_kernel.cuh>
#include <bcl/containers/experimental/cuda/DArray.hpp>
// #include <hiprand/hiprand.h>
// #include <hiprand/hiprand_kernel.h>
template <typename T>
__global__ void kernel(size_t num_blocks, size_t transfer_size, size_t num_lookups,
BCL::cuda::device_vector<size_t>& rand_nums,
BCL::cuda::device_vector<T, BCL::cuda::bcl_allocator<T>>& buffer,
BCL::cuda::DArray<T>& array) {
size_t ltid = threadIdx.x;
size_t local_extent = blockDim.x;
size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
size_t warp_id = tid / 32;
size_t block_id = blockIdx.x;
if (block_id >= num_blocks) {
return;
}
for (size_t i = 0; i < num_lookups; i++) {
size_t dest_rank = rand_nums[block_id + i*num_blocks*2] % array.d_ptrs_.size();
size_t rand_loc = rand_nums[block_id+num_blocks + i*num_blocks*2] % (array.local_size() - transfer_size);
auto ptr = array.d_ptrs_[dest_rank].get() + rand_loc;
T* buf = buffer.data() + block_id * transfer_size;
BCL::cuda::memcpy_warp(buf, ptr, transfer_size*sizeof(T));
BCL::cuda::flush();
}
}
template <typename T>
BCL::cuda::device_vector<T> random_device_vector(size_t n,
size_t extent = std::numeric_limits<size_t>::max()) {
std::vector<T> v(n);
srand48(BCL::rank());
for (size_t i = 0; i < n; i++) {
v[i] = lrand48() % extent;
}
return BCL::cuda::device_vector<T>(v.begin(), v.end());
}
/*
Parameters are
1) Global data size
2) Transfer size
3) Number of lookups
a) Number of threads
b) Lookups per thread
*/
template <typename T = int>
void run_kernel(size_t transfer_data_size) {
BCL::print("Running kernel...\n");
static_assert(std::is_same<T, int>::value);
// XXX: Important parameters
// 1) Global data size, in bytes.
size_t global_data_size = size_t(2)*1024*1024*size_t(1024);
// 3a) Number of threads to launch, per processor
size_t num_threads = 1000;
size_t threads_per_block = 32;
size_t num_blocks = (num_threads + threads_per_block - 1) / threads_per_block;
// 3b) Number of lookups to perform, per block
size_t num_lookups = 20;
BCL::print("Creating random device vector...\n");
auto rand_nums = random_device_vector<size_t>(num_blocks*2*num_lookups);
size_t global_size = global_data_size / sizeof(T);
size_t transfer_size = transfer_data_size / sizeof(T);
assert(transfer_size > 0);
BCL::print("Creating DArray...\n");
BCL::cuda::DArray<T> array(global_size);
BCL::print("Created DArray...\n");
BCL::cuda::device_vector<T, BCL::cuda::bcl_allocator<T>> buffer(transfer_size * num_blocks);
BCL::print("Creating buffer of size %lu\n", transfer_size * num_blocks);
BCL::cuda::barrier();
auto begin = std::chrono::high_resolution_clock::now();
BCL::print("Beginning operation...\n");
hipLaunchKernelGGL(( kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0,
num_blocks, transfer_size, num_lookups, rand_nums, buffer, array);
hipDeviceSynchronize();
BCL::cuda::barrier();
auto end = std::chrono::high_resolution_clock::now();
double duration = std::chrono::duration<double>(end - begin).count();
size_t transferred_data = transfer_data_size * num_blocks * num_lookups * BCL::nprocs();
double bw = 1.0e-9*(transferred_data / duration);
double bwpn = bw / BCL::nprocs();
BCL::print("Ended in %lf seconds.\n", duration);
BCL::print("Total BW %lf GB/s\n", bw);
BCL::print("%lf GB/s per process\n", bwpn);
}
int main(int argc, char** argv) {
BCL::init(64);
BCL::cuda::init(4096);
if (argc < 2) {
if (BCL::rank() == 0) {
fprintf(stderr, "usage: ./irregular-lookup [transfer_data_size (bytes)]\n");
}
BCL::cuda::finalize();
BCL::finalize();
return 1;
}
BCL::print("Beginning experiment...\n");
size_t transfer_data_size = size_t(256)*size_t(1024);
transfer_data_size = std::atoll(argv[1]);
run_kernel(transfer_data_size);
BCL::cuda::finalize();
BCL::finalize();
return 0;
}
| b86b68ccd87a35e469bb5e372b0db9a1dbd29c3d.cu | // SPDX-FileCopyrightText: 2021 Benjamin Brock
//
// SPDX-License-Identifier: BSD-3-Clause
#include <bcl/bcl.hpp>
#include <bcl/backends/experimental/nvshmem/backend.hpp>
#include <limits>
#include <bcl/containers/experimental/cuda/launch_kernel.cuh>
#include <bcl/containers/experimental/cuda/DArray.hpp>
// #include <curand.h>
// #include <curand_kernel.h>
template <typename T>
__global__ void kernel(size_t num_blocks, size_t transfer_size, size_t num_lookups,
BCL::cuda::device_vector<size_t>& rand_nums,
BCL::cuda::device_vector<T, BCL::cuda::bcl_allocator<T>>& buffer,
BCL::cuda::DArray<T>& array) {
size_t ltid = threadIdx.x;
size_t local_extent = blockDim.x;
size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
size_t warp_id = tid / 32;
size_t block_id = blockIdx.x;
if (block_id >= num_blocks) {
return;
}
for (size_t i = 0; i < num_lookups; i++) {
size_t dest_rank = rand_nums[block_id + i*num_blocks*2] % array.d_ptrs_.size();
size_t rand_loc = rand_nums[block_id+num_blocks + i*num_blocks*2] % (array.local_size() - transfer_size);
auto ptr = array.d_ptrs_[dest_rank].get() + rand_loc;
T* buf = buffer.data() + block_id * transfer_size;
BCL::cuda::memcpy_warp(buf, ptr, transfer_size*sizeof(T));
BCL::cuda::flush();
}
}
template <typename T>
BCL::cuda::device_vector<T> random_device_vector(size_t n,
size_t extent = std::numeric_limits<size_t>::max()) {
std::vector<T> v(n);
srand48(BCL::rank());
for (size_t i = 0; i < n; i++) {
v[i] = lrand48() % extent;
}
return BCL::cuda::device_vector<T>(v.begin(), v.end());
}
/*
Parameters are
1) Global data size
2) Transfer size
3) Number of lookups
a) Number of threads
b) Lookups per thread
*/
template <typename T = int>
void run_kernel(size_t transfer_data_size) {
BCL::print("Running kernel...\n");
static_assert(std::is_same<T, int>::value);
// XXX: Important parameters
// 1) Global data size, in bytes.
size_t global_data_size = size_t(2)*1024*1024*size_t(1024);
// 3a) Number of threads to launch, per processor
size_t num_threads = 1000;
size_t threads_per_block = 32;
size_t num_blocks = (num_threads + threads_per_block - 1) / threads_per_block;
// 3b) Number of lookups to perform, per block
size_t num_lookups = 20;
BCL::print("Creating random device vector...\n");
auto rand_nums = random_device_vector<size_t>(num_blocks*2*num_lookups);
size_t global_size = global_data_size / sizeof(T);
size_t transfer_size = transfer_data_size / sizeof(T);
assert(transfer_size > 0);
BCL::print("Creating DArray...\n");
BCL::cuda::DArray<T> array(global_size);
BCL::print("Created DArray...\n");
BCL::cuda::device_vector<T, BCL::cuda::bcl_allocator<T>> buffer(transfer_size * num_blocks);
BCL::print("Creating buffer of size %lu\n", transfer_size * num_blocks);
BCL::cuda::barrier();
auto begin = std::chrono::high_resolution_clock::now();
BCL::print("Beginning operation...\n");
kernel<<<num_blocks, threads_per_block>>>
(num_blocks, transfer_size, num_lookups, rand_nums, buffer, array);
cudaDeviceSynchronize();
BCL::cuda::barrier();
auto end = std::chrono::high_resolution_clock::now();
double duration = std::chrono::duration<double>(end - begin).count();
size_t transferred_data = transfer_data_size * num_blocks * num_lookups * BCL::nprocs();
double bw = 1.0e-9*(transferred_data / duration);
double bwpn = bw / BCL::nprocs();
BCL::print("Ended in %lf seconds.\n", duration);
BCL::print("Total BW %lf GB/s\n", bw);
BCL::print("%lf GB/s per process\n", bwpn);
}
int main(int argc, char** argv) {
BCL::init(64);
BCL::cuda::init(4096);
if (argc < 2) {
if (BCL::rank() == 0) {
fprintf(stderr, "usage: ./irregular-lookup [transfer_data_size (bytes)]\n");
}
BCL::cuda::finalize();
BCL::finalize();
return 1;
}
BCL::print("Beginning experiment...\n");
size_t transfer_data_size = size_t(256)*size_t(1024);
transfer_data_size = std::atoll(argv[1]);
run_kernel(transfer_data_size);
BCL::cuda::finalize();
BCL::finalize();
return 0;
}
|
b5e2d43dfe024d75b41f91ae2440d3eb47822c07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2015 Zhu.Jin Liang
#include <algorithm>
#include <map>
#include <set>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/net.hpp"
#include "caffe/common.hpp"
#include "caffe/zhu_face_layers.hpp"
#include "caffe/util/util_coords.hpp"
#include "caffe/util/util_pre_define.hpp"
namespace caffe {
template <typename Dtype>
__global__ void find_max_kernel(const int n, const Dtype* input,
int* crop_beg, const Dtype* coefs,
const int step, const int width,
const int crop_h, const int crop_w) {
CUDA_KERNEL_LOOP(index, n) {
const int offset = index * step;
Dtype max_val = input[offset];
int max_idx = 0;
for (int i = 1; i < step; ++i) {
if (input[offset + i] > max_val) {
max_val = input[offset + i];
max_idx = i;
}
}
crop_beg[index * 2] = ::floor((coefs[0] * (max_idx / width) + coefs[1])
- crop_h / Dtype(2.));
crop_beg[index * 2 + 1] = ::floor((coefs[2] * (max_idx % width) + coefs[3])
- crop_w / Dtype(2.));
}
}
template <typename Dtype>
__global__ void crop_patch_forward_kernel(const int nthreads,
const Dtype* src, Dtype* dst,
const int* crop_beg, const bool match_channel,
const int src_channels, const int src_height, const int src_width,
const int dst_channels, const int dst_height, const int dst_width,
const int bottom1_channels) {
CUDA_KERNEL_LOOP(index, nthreads) {
// indexdst
// indexn, c, h, w
// srcdstbottom1
// n
// dst
int dst_index = index;
const int dst_w = dst_index % dst_width;
dst_index /= dst_width;
const int dst_h = dst_index % dst_height;
dst_index /= dst_height;
const int dst_c = dst_index % dst_channels;
const int n = dst_index /= dst_channels;
// bottom1/dst/srcchannel:
// dst_c = match_channel ? src_c : (bottom1_c * src_channels + src_c);
// (1) match_channelfalsebottom1/srcchannel
// (2) match_channeltruesrcchannel
// bottom1_c = dst_c = src_cchannel
// bottom1
const int bottom1_c = match_channel ? dst_c : (dst_c / src_channels);
const int bottom1_offset = (n * bottom1_channels + bottom1_c) * 2;
// src
const int src_c = match_channel ? dst_c : (dst_c % src_channels);
const int src_h = dst_h + crop_beg[bottom1_offset];
if (src_h < 0 || src_h >= src_height) continue;
const int src_w = dst_w + crop_beg[bottom1_offset + 1];
if (src_w < 0 || src_w >= src_width) continue;
const int src_index = (((n * src_channels) + src_c) * src_height + src_h) * src_width + src_w;
dst[index] = src[src_index];
}
}
template <typename Dtype>
void CropPatchFromMaxFeaturePositionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < top.size(); ++i) {
caffe::caffe_gpu_set(top[i]->count(), Dtype(0), top[i]->mutable_gpu_data());
}
const int step = bottom[1]->height() * bottom[1]->width();
const int count = bottom[1]->num() * bottom[1]->channels();
hipLaunchKernelGGL(( find_max_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom[1]->gpu_data(),
crop_beg_.mutable_gpu_data(), coefs_.gpu_data(),
step, bottom[1]->width(),
crop_h_, crop_w_);
CUDA_POST_KERNEL_CHECK;
const int* is_match_channel = is_match_channel_.cpu_data();
for (int i = 0; i < top.size(); ++i) {
hipLaunchKernelGGL(( crop_patch_forward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(top[i]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[i]->count(),
bottom[i + 2]->gpu_data(), top[i]->mutable_gpu_data(),
crop_beg_.gpu_data(), (is_match_channel[i] == 1),
bottom[i + 2]->channels(), bottom[i + 2]->height(), bottom[i + 2]->width(),
top[i]->channels(), top[i]->height(), top[i]->width(),
bottom[1]->channels());
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
void CropPatchFromMaxFeaturePositionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < top.size(); ++i) {
if (propagate_down[i + 2]) {
caffe::caffe_gpu_set(bottom[i + 2]->count(), Dtype(0.), bottom[i + 2]->mutable_gpu_diff());
int crop_idx = 0;
const int* crop_beg = crop_beg_.cpu_data();
const int* is_match_channel = is_match_channel_.cpu_data();
for (int n = 0; n < bottom[1]->num(); ++n) {
for (int c = 0; c < bottom[1]->channels(); ++c, crop_idx += 2) {
const int top_blank = crop_beg[crop_idx] < 0 ? -crop_beg[crop_idx] : 0;
const int left_blank = crop_beg[crop_idx + 1] < 0 ? -crop_beg[crop_idx + 1] : 0;
const int actual_crop_x_beg = crop_beg[crop_idx + 1] + left_blank;
const int actual_crop_x_end = MIN(bottom[0]->width(), crop_beg[crop_idx + 1] + crop_w_);
const int actual_crop_w = actual_crop_x_end - actual_crop_x_beg;
if (actual_crop_w <= 0) {
continue;
}
for (int bottom_c = 0; bottom_c < bottom[i + 2]->channels(); ++bottom_c) {
if (is_match_channel[i] == 1) {
if (bottom_c != c) continue;
}
const int top_c = (is_match_channel[i] == 1) ? bottom_c : (c * bottom[i + 2]->channels() + bottom_c);
for (int top_h = top_blank, crop_y = crop_beg[crop_idx] + top_blank;
top_h < top[i]->height() && crop_y < bottom[i + 2]->height(); ++top_h, ++crop_y) {
caffe_gpu_axpy(actual_crop_w, Dtype(1),
top[i]->gpu_diff() + top[i]->offset(n, top_c, top_h, left_blank),
bottom[i + 2]->mutable_gpu_diff() + bottom[i + 2]->offset(n, bottom_c, crop_y, actual_crop_x_beg));
}
}
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CropPatchFromMaxFeaturePositionLayer);
} // namespace caffe | b5e2d43dfe024d75b41f91ae2440d3eb47822c07.cu | // Copyright 2015 Zhu.Jin Liang
#include <algorithm>
#include <map>
#include <set>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/net.hpp"
#include "caffe/common.hpp"
#include "caffe/zhu_face_layers.hpp"
#include "caffe/util/util_coords.hpp"
#include "caffe/util/util_pre_define.hpp"
namespace caffe {
template <typename Dtype>
__global__ void find_max_kernel(const int n, const Dtype* input,
int* crop_beg, const Dtype* coefs,
const int step, const int width,
const int crop_h, const int crop_w) {
CUDA_KERNEL_LOOP(index, n) {
const int offset = index * step;
Dtype max_val = input[offset];
int max_idx = 0;
for (int i = 1; i < step; ++i) {
if (input[offset + i] > max_val) {
max_val = input[offset + i];
max_idx = i;
}
}
crop_beg[index * 2] = std::floor((coefs[0] * (max_idx / width) + coefs[1])
- crop_h / Dtype(2.));
crop_beg[index * 2 + 1] = std::floor((coefs[2] * (max_idx % width) + coefs[3])
- crop_w / Dtype(2.));
}
}
template <typename Dtype>
__global__ void crop_patch_forward_kernel(const int nthreads,
const Dtype* src, Dtype* dst,
const int* crop_beg, const bool match_channel,
const int src_channels, const int src_height, const int src_width,
const int dst_channels, const int dst_height, const int dst_width,
const int bottom1_channels) {
CUDA_KERNEL_LOOP(index, nthreads) {
// 假设index是dst的下标
// 如何从index推出所有的n, c, h, w
// 要推出src的,dst的,以及bottom1的
// 三者的n都会相同
// dst的很好得到:
int dst_index = index;
const int dst_w = dst_index % dst_width;
dst_index /= dst_width;
const int dst_h = dst_index % dst_height;
dst_index /= dst_height;
const int dst_c = dst_index % dst_channels;
const int n = dst_index /= dst_channels;
// bottom1/dst/src的channel满足如下公式:
// dst_c = match_channel ? src_c : (bottom1_c * src_channels + src_c);
// (1) match_channel为false,那么通过求余和除法就可以得到bottom1/src的channel
// (2) match_channel为true,由上面公式只能得到src的channel,但是注意到一个是,这时候肯定会有
// bottom1_c = dst_c = src_c的,因为只会截取对应channel
// bottom1的
const int bottom1_c = match_channel ? dst_c : (dst_c / src_channels);
const int bottom1_offset = (n * bottom1_channels + bottom1_c) * 2;
// src的
const int src_c = match_channel ? dst_c : (dst_c % src_channels);
const int src_h = dst_h + crop_beg[bottom1_offset];
if (src_h < 0 || src_h >= src_height) continue;
const int src_w = dst_w + crop_beg[bottom1_offset + 1];
if (src_w < 0 || src_w >= src_width) continue;
const int src_index = (((n * src_channels) + src_c) * src_height + src_h) * src_width + src_w;
dst[index] = src[src_index];
}
}
template <typename Dtype>
void CropPatchFromMaxFeaturePositionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < top.size(); ++i) {
caffe::caffe_gpu_set(top[i]->count(), Dtype(0), top[i]->mutable_gpu_data());
}
const int step = bottom[1]->height() * bottom[1]->width();
const int count = bottom[1]->num() * bottom[1]->channels();
find_max_kernel<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom[1]->gpu_data(),
crop_beg_.mutable_gpu_data(), coefs_.gpu_data(),
step, bottom[1]->width(),
crop_h_, crop_w_);
CUDA_POST_KERNEL_CHECK;
const int* is_match_channel = is_match_channel_.cpu_data();
for (int i = 0; i < top.size(); ++i) {
crop_patch_forward_kernel<Dtype><<<CAFFE_GET_BLOCKS(top[i]->count()), CAFFE_CUDA_NUM_THREADS>>>(
top[i]->count(),
bottom[i + 2]->gpu_data(), top[i]->mutable_gpu_data(),
crop_beg_.gpu_data(), (is_match_channel[i] == 1),
bottom[i + 2]->channels(), bottom[i + 2]->height(), bottom[i + 2]->width(),
top[i]->channels(), top[i]->height(), top[i]->width(),
bottom[1]->channels());
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
void CropPatchFromMaxFeaturePositionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < top.size(); ++i) {
if (propagate_down[i + 2]) {
caffe::caffe_gpu_set(bottom[i + 2]->count(), Dtype(0.), bottom[i + 2]->mutable_gpu_diff());
int crop_idx = 0;
const int* crop_beg = crop_beg_.cpu_data();
const int* is_match_channel = is_match_channel_.cpu_data();
for (int n = 0; n < bottom[1]->num(); ++n) {
for (int c = 0; c < bottom[1]->channels(); ++c, crop_idx += 2) {
const int top_blank = crop_beg[crop_idx] < 0 ? -crop_beg[crop_idx] : 0;
const int left_blank = crop_beg[crop_idx + 1] < 0 ? -crop_beg[crop_idx + 1] : 0;
const int actual_crop_x_beg = crop_beg[crop_idx + 1] + left_blank;
const int actual_crop_x_end = MIN(bottom[0]->width(), crop_beg[crop_idx + 1] + crop_w_);
const int actual_crop_w = actual_crop_x_end - actual_crop_x_beg;
if (actual_crop_w <= 0) {
continue;
}
for (int bottom_c = 0; bottom_c < bottom[i + 2]->channels(); ++bottom_c) {
if (is_match_channel[i] == 1) {
if (bottom_c != c) continue;
}
const int top_c = (is_match_channel[i] == 1) ? bottom_c : (c * bottom[i + 2]->channels() + bottom_c);
for (int top_h = top_blank, crop_y = crop_beg[crop_idx] + top_blank;
top_h < top[i]->height() && crop_y < bottom[i + 2]->height(); ++top_h, ++crop_y) {
caffe_gpu_axpy(actual_crop_w, Dtype(1),
top[i]->gpu_diff() + top[i]->offset(n, top_c, top_h, left_blank),
bottom[i + 2]->mutable_gpu_diff() + bottom[i + 2]->offset(n, bottom_c, crop_y, actual_crop_x_beg));
}
}
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CropPatchFromMaxFeaturePositionLayer);
} // namespace caffe |
1e98b7e38dc5eb147fd4ade3b528d3c6e05a13cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017 Rory mitchell
*/
#include <thrust/binary_search.h>
#include <thrust/count.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <hipcub/hipcub.hpp>
#include <algorithm>
#include <functional>
#include <future>
#include <numeric>
#include "common_hip.cuh"
#include "device_helpers_hip.cuh"
#include "dmlc/timer.h"
#include "gpu_hist_builder_hip.cuh"
namespace xgboost {
namespace tree {
void DeviceGMat::Init(int device_idx, const common::GHistIndexMatrix& gmat,
bst_ulong element_begin, bst_ulong element_end,
bst_ulong row_begin, bst_ulong row_end, int n_bins) {
dh::safe_cuda(hipSetDevice(device_idx));
CHECK(gidx_buffer.size()) << "gidx_buffer must be externally allocated";
CHECK_EQ(row_ptr.size(), (row_end - row_begin) + 1)
<< "row_ptr must be externally allocated";
common::CompressedBufferWriter cbw(n_bins);
std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size());
cbw.Write(host_buffer.data(), gmat.index.begin() + element_begin,
gmat.index.begin() + element_end);
gidx_buffer = host_buffer;
gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), n_bins);
// row_ptr
thrust::copy(gmat.row_ptr.data() + row_begin,
gmat.row_ptr.data() + row_end + 1, row_ptr.tbegin());
// normalise row_ptr
size_t start = gmat.row_ptr[row_begin];
thrust::transform(row_ptr.tbegin(), row_ptr.tend(), row_ptr.tbegin(),
[=] __device__(size_t val) { return val - start; });
}
void DeviceHist::Init(int n_bins_in) {
this->n_bins = n_bins_in;
CHECK(!data.empty()) << "DeviceHist must be externally allocated";
}
void DeviceHist::Reset(int device_idx) {
hipSetDevice(device_idx);
data.fill(bst_gpair_precise());
}
bst_gpair_precise* DeviceHist::GetLevelPtr(int depth) {
return data.data() + n_nodes(depth - 1) * n_bins;
}
int DeviceHist::LevelSize(int depth) { return n_bins * n_nodes_level(depth); }
HistBuilder DeviceHist::GetBuilder() {
return HistBuilder(data.data(), n_bins);
}
HistBuilder::HistBuilder(bst_gpair_precise* ptr, int n_bins)
: d_hist(ptr), n_bins(n_bins) {}
// Define double precision atomic add for older architectures
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address; // NOLINT
unsigned long long int old = *address_as_ull, assumed; // NOLINT
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__device__ void HistBuilder::Add(bst_gpair_precise gpair, int gidx, int nidx) const {
int hist_idx = nidx * n_bins + gidx;
atomicAdd(&(d_hist[hist_idx].grad), gpair.grad); // OPTMARK: This and below
// line lead to about 3X
// slowdown due to memory
// dependency and access
// pattern issues.
atomicAdd(&(d_hist[hist_idx].hess), gpair.hess);
}
__device__ bst_gpair_precise HistBuilder::Get(int gidx, int nidx) const {
return d_hist[nidx * n_bins + gidx];
}
GPUHistBuilder::GPUHistBuilder()
: initialised(false),
is_dense(false),
p_last_fmat_(nullptr),
prediction_cache_initialised(false) {}
GPUHistBuilder::~GPUHistBuilder() {
if (initialised) {
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
ncclCommDestroy(comms[d_idx]);
dh::safe_cuda(hipSetDevice(dList[d_idx]));
dh::safe_cuda(hipStreamDestroy(*(streams[d_idx])));
}
for (int num_d = 1; num_d <= n_devices;
++num_d) { // loop over number of devices used
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
ncclCommDestroy(find_split_comms[num_d - 1][d_idx]);
}
}
}
}
void GPUHistBuilder::Init(const TrainParam& param) {
CHECK(param.max_depth < 16) << "Tree depth too large.";
CHECK(param.max_depth != 0) << "Tree depth cannot be 0.";
CHECK(param.grow_policy != TrainParam::kLossGuide)
<< "Loss guided growth policy not supported. Use CPU algorithm.";
this->param = param;
CHECK(param.n_gpus != 0) << "Must have at least one device";
int n_devices_all = dh::n_devices_all(param.n_gpus);
for (int device_idx = 0; device_idx < n_devices_all; device_idx++) {
if (!param.silent) {
size_t free_memory = dh::available_memory(device_idx);
const int mb_size = 1048576;
LOG(CONSOLE) << "Device: [" << device_idx << "] "
<< dh::device_name(device_idx) << " with "
<< free_memory / mb_size << " MB available device memory.";
}
}
}
void GPUHistBuilder::InitData(const std::vector<bst_gpair>& gpair,
DMatrix& fmat, // NOLINT
const RegTree& tree) {
// set member num_rows and n_devices for rest of GPUHistBuilder members
info = &fmat.info();
num_rows = info->num_row;
n_devices = dh::n_devices(param.n_gpus, num_rows);
if (!initialised) {
// set dList member
dList.resize(n_devices);
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices();
dList[d_idx] = device_idx;
}
// initialize nccl
comms.resize(n_devices);
streams.resize(n_devices);
dh::safe_nccl(ncclCommInitAll(comms.data(), n_devices,
dList.data())); // initialize communicator
// (One communicator per
// process)
// printf("# NCCL: Using devices\n");
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
streams[d_idx] =
reinterpret_cast<hipStream_t*>(malloc(sizeof(hipStream_t)));
dh::safe_cuda(hipSetDevice(dList[d_idx]));
dh::safe_cuda(hipStreamCreate(streams[d_idx]));
int cudaDev;
int rank;
hipDeviceProp_t prop;
dh::safe_nccl(ncclCommCuDevice(comms[d_idx], &cudaDev));
dh::safe_nccl(ncclCommUserRank(comms[d_idx], &rank));
dh::safe_cuda(hipGetDeviceProperties(&prop, cudaDev));
// printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
// prop.pciBusID, prop.name);
fflush(stdout);
}
// local find_split group of comms for each case of reduced number of GPUs
// to use
find_split_comms.resize(
n_devices,
std::vector<ncclComm_t>(n_devices)); // TODO(JCM): Excessive, but
// ok, and best to do
// here instead of
// repeatedly
for (int num_d = 1; num_d <= n_devices;
++num_d) { // loop over number of devices used
dh::safe_nccl(ncclCommInitAll(find_split_comms[num_d - 1].data(), num_d,
dList.data())); // initialize communicator
// (One communicator per
// process)
}
CHECK(fmat.SingleColBlock()) << "grow_gpu_hist: must have single column "
"block. Try setting 'tree_method' "
"parameter to 'exact'";
is_dense = info->num_nonzero == info->num_col * info->num_row;
hmat_.Init(&fmat, param.max_bin);
gmat_.cut = &hmat_;
gmat_.Init(&fmat);
int n_bins = hmat_.row_ptr.back();
int n_features = hmat_.row_ptr.size() - 1;
// deliniate data onto multiple gpus
device_row_segments.push_back(0);
device_element_segments.push_back(0);
bst_uint offset = 0;
bst_uint shard_size = ::ceil(static_cast<double>(num_rows) / n_devices);
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
offset += shard_size;
offset = ::min(offset, num_rows);
device_row_segments.push_back(offset);
device_element_segments.push_back(gmat_.row_ptr[offset]);
}
// Build feature segments
std::vector<int> h_feature_segments;
for (int node = 0; node < n_nodes_level(param.max_depth - 1); node++) {
for (int fidx = 0; fidx < n_features; fidx++) {
h_feature_segments.push_back(hmat_.row_ptr[fidx] + node * n_bins);
}
}
h_feature_segments.push_back(n_nodes_level(param.max_depth - 1) * n_bins);
// Construct feature map
std::vector<int> h_gidx_feature_map(n_bins);
for (int fidx = 0; fidx < n_features; fidx++) {
for (int i = hmat_.row_ptr[fidx]; i < hmat_.row_ptr[fidx + 1]; i++) {
h_gidx_feature_map[i] = fidx;
}
}
int level_max_bins = n_nodes_level(param.max_depth - 1) * n_bins;
// allocate unique common data that reside on master device (NOTE: None
// currently)
// int master_device=dList[0];
// ba.allocate(master_device, );
// allocate vectors across all devices
temp_memory.resize(n_devices);
hist_vec.resize(n_devices);
nodes.resize(n_devices);
nodes_temp.resize(n_devices);
nodes_child_temp.resize(n_devices);
left_child_smallest.resize(n_devices);
left_child_smallest_temp.resize(n_devices);
feature_flags.resize(n_devices);
fidx_min_map.resize(n_devices);
feature_segments.resize(n_devices);
prediction_cache.resize(n_devices);
position.resize(n_devices);
position_tmp.resize(n_devices);
device_matrix.resize(n_devices);
device_gpair.resize(n_devices);
gidx_feature_map.resize(n_devices);
gidx_fvalue_map.resize(n_devices);
int find_split_n_devices = ::pow(2, ::floor(std::log2(n_devices)));
find_split_n_devices =
::min(n_nodes_level(param.max_depth), find_split_n_devices);
int max_num_nodes_device =
n_nodes_level(param.max_depth) / find_split_n_devices;
// num_rows_segment: for sharding rows onto gpus for splitting data
// num_elements_segment: for sharding rows (of elements) onto gpus for
// splitting data
// max_num_nodes_device: for sharding nodes onto gpus for split finding
// All other variables have full copy on gpu, with copy either being
// identical or just current portion (like for histogram) before AllReduce
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
bst_uint num_rows_segment =
device_row_segments[d_idx + 1] - device_row_segments[d_idx];
bst_ulong num_elements_segment =
device_element_segments[d_idx + 1] - device_element_segments[d_idx];
ba.allocate(
device_idx, &(hist_vec[d_idx].data),
n_nodes(param.max_depth - 1) * n_bins, &nodes[d_idx],
n_nodes(param.max_depth), &nodes_temp[d_idx], max_num_nodes_device,
&nodes_child_temp[d_idx], max_num_nodes_device,
&left_child_smallest[d_idx], n_nodes(param.max_depth),
&left_child_smallest_temp[d_idx], max_num_nodes_device,
&feature_flags[d_idx],
n_features, // may change but same on all devices
&fidx_min_map[d_idx],
hmat_.min_val.size(), // constant and same on all devices
&feature_segments[d_idx],
h_feature_segments.size(), // constant and same on all devices
&prediction_cache[d_idx], num_rows_segment, &position[d_idx],
num_rows_segment, &position_tmp[d_idx], num_rows_segment,
&device_gpair[d_idx], num_rows_segment,
&device_matrix[d_idx].gidx_buffer,
common::CompressedBufferWriter::CalculateBufferSize(
num_elements_segment,
n_bins), // constant and same on all devices
&device_matrix[d_idx].row_ptr, num_rows_segment + 1,
&gidx_feature_map[d_idx], n_bins, // constant and same on all devices
&gidx_fvalue_map[d_idx],
hmat_.cut.size()); // constant and same on all devices
// Copy Host to Device (assumes comes after ba.allocate that sets device)
device_matrix[d_idx].Init(
device_idx, gmat_, device_element_segments[d_idx],
device_element_segments[d_idx + 1], device_row_segments[d_idx],
device_row_segments[d_idx + 1], n_bins);
gidx_feature_map[d_idx] = h_gidx_feature_map;
gidx_fvalue_map[d_idx] = hmat_.cut;
feature_segments[d_idx] = h_feature_segments;
fidx_min_map[d_idx] = hmat_.min_val;
// Initialize, no copy
hist_vec[d_idx].Init(n_bins); // init host object
prediction_cache[d_idx].fill(0); // init device object (assumes comes
// after ba.allocate that sets device)
feature_flags[d_idx].fill(1); // init device object (assumes comes after
// ba.allocate that sets device)
}
if (!param.silent) {
const int mb_size = 1048576;
LOG(CONSOLE) << "Allocated " << ba.size() / mb_size << " MB";
}
initialised = true;
}
// copy or init to do every iteration
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
nodes[d_idx].fill(Node());
nodes_temp[d_idx].fill(Node());
nodes_child_temp[d_idx].fill(Node());
position[d_idx].fill(0);
device_gpair[d_idx].copy(gpair.begin() + device_row_segments[d_idx],
gpair.begin() + device_row_segments[d_idx + 1]);
subsample_gpair(&device_gpair[d_idx], param.subsample,
device_row_segments[d_idx]);
hist_vec[d_idx].Reset(device_idx);
// left_child_smallest and left_child_smallest_temp don't need to be
// initialized
}
dh::synchronize_n_devices(n_devices, dList);
p_last_fmat_ = &fmat;
}
void GPUHistBuilder::BuildHist(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t begin = device_element_segments[d_idx];
size_t end = device_element_segments[d_idx + 1];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
auto d_gidx = device_matrix[d_idx].gidx;
auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin();
auto d_position = position[d_idx].data();
auto d_gpair = device_gpair[d_idx].data();
auto d_left_child_smallest = left_child_smallest[d_idx].data();
auto hist_builder = hist_vec[d_idx].GetBuilder();
dh::TransformLbs(
device_idx, &temp_memory[d_idx], end - begin, d_row_ptr,
row_end - row_begin, is_dense, [=] __device__(size_t local_idx, int local_ridx) {
int nidx = d_position[local_ridx]; // OPTMARK: latency
if (!is_active(nidx, depth)) return;
// Only increment smallest node
bool is_smallest = (d_left_child_smallest[parent_nidx(nidx)] &&
is_left_child(nidx)) ||
(!d_left_child_smallest[parent_nidx(nidx)] &&
!is_left_child(nidx));
if (!is_smallest && depth > 0) return;
int gidx = d_gidx[local_idx];
bst_gpair gpair = d_gpair[local_ridx];
hist_builder.Add(gpair, gidx,
nidx); // OPTMARK: This is slow, could use
// shared memory or cache results
// intead of writing to global
// memory every time in atomic way.
});
}
dh::synchronize_n_devices(n_devices, dList);
// time.printElapsed("Add Time");
// (in-place) reduce each element of histogram (for only current level) across
// multiple gpus
// TODO(JCM): use out of place with pre-allocated buffer, but then have to
// copy
// back on device
// fprintf(stderr,"sizeof(bst_gpair)/sizeof(float)=%d\n",sizeof(bst_gpair)/sizeof(float));
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_nccl(ncclAllReduce(
reinterpret_cast<const void*>(hist_vec[d_idx].GetLevelPtr(depth)),
reinterpret_cast<void*>(hist_vec[d_idx].GetLevelPtr(depth)),
hist_vec[d_idx].LevelSize(depth) * sizeof(bst_gpair_precise) / sizeof(double),
ncclDouble, ncclSum, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx])));
}
// if no NCCL, then presume only 1 GPU, then already correct
// time.printElapsed("Reduce-Add Time");
// Subtraction trick (applied to all devices in same way -- to avoid doing on
// master and then Bcast)
if (depth > 0) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
auto hist_builder = hist_vec[d_idx].GetBuilder();
auto d_left_child_smallest = left_child_smallest[d_idx].data();
int n_sub_bins = (n_nodes_level(depth) / 2) * hist_builder.n_bins;
dh::launch_n(device_idx, n_sub_bins, [=] __device__(int idx) {
int nidx = n_nodes(depth - 1) + ((idx / hist_builder.n_bins) * 2);
bool left_smallest = d_left_child_smallest[parent_nidx(nidx)];
if (left_smallest) {
nidx++; // If left is smallest switch to right child
}
int gidx = idx % hist_builder.n_bins;
bst_gpair_precise parent = hist_builder.Get(gidx, parent_nidx(nidx));
int other_nidx = left_smallest ? nidx - 1 : nidx + 1;
bst_gpair_precise other = hist_builder.Get(gidx, other_nidx);
hist_builder.Add(parent - other, gidx,
nidx); // OPTMARK: This is slow, could use shared
// memory or cache results intead of writing to
// global memory every time in atomic way.
});
}
dh::synchronize_n_devices(n_devices, dList);
}
}
template <int BLOCK_THREADS>
__global__ void find_split_kernel(
const bst_gpair_precise* d_level_hist, int* d_feature_segments, int depth,
int n_features, int n_bins, Node* d_nodes, Node* d_nodes_temp,
Node* d_nodes_child_temp, int nodes_offset_device, float* d_fidx_min_map,
float* d_gidx_fvalue_map, GPUTrainingParam gpu_param,
bool* d_left_child_smallest_temp, bool colsample, int* d_feature_flags) {
typedef hipcub::KeyValuePair<int, float> ArgMaxT;
typedef hipcub::BlockScan<bst_gpair_precise, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>
BlockScanT;
typedef hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT;
typedef hipcub::BlockReduce<bst_gpair_precise, BLOCK_THREADS> SumReduceT;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
struct UninitializedSplit : cub::Uninitialized<Split> {};
struct UninitializedGpair : cub::Uninitialized<bst_gpair_precise> {};
__shared__ UninitializedSplit uninitialized_split;
Split& split = uninitialized_split.Alias();
__shared__ UninitializedGpair uninitialized_sum;
bst_gpair_precise& shared_sum = uninitialized_sum.Alias();
__shared__ ArgMaxT block_max;
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
split = Split();
}
__syncthreads();
// below two are for accessing full-sized node list stored on each device
// always one block per node, BLOCK_THREADS threads per block
int level_node_idx = blockIdx.x + nodes_offset_device;
int node_idx = n_nodes(depth - 1) + level_node_idx;
for (int fidx = 0; fidx < n_features; fidx++) {
if (colsample && d_feature_flags[fidx] == 0) continue;
int begin = d_feature_segments[level_node_idx * n_features + fidx];
int end = d_feature_segments[level_node_idx * n_features + fidx + 1];
bst_gpair_precise feature_sum = bst_gpair_precise();
for (int reduce_begin = begin; reduce_begin < end;
reduce_begin += BLOCK_THREADS) {
bool thread_active = reduce_begin + threadIdx.x < end;
// Scan histogram
bst_gpair_precise bin = thread_active ? d_level_hist[reduce_begin + threadIdx.x]
: bst_gpair_precise();
feature_sum +=
SumReduceT(temp_storage.sum_reduce).Reduce(bin, hipcub::Sum());
}
if (threadIdx.x == 0) {
shared_sum = feature_sum;
}
// __syncthreads(); // no need to synch because below there is a Scan
GpairCallbackOp prefix_op = GpairCallbackOp();
for (int scan_begin = begin; scan_begin < end;
scan_begin += BLOCK_THREADS) {
bool thread_active = scan_begin + threadIdx.x < end;
bst_gpair_precise bin =
thread_active ? d_level_hist[scan_begin + threadIdx.x] : bst_gpair_precise();
BlockScanT(temp_storage.scan)
.ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op);
// Calculate gain
bst_gpair_precise parent_sum = d_nodes[node_idx].sum_gradients;
float parent_gain = d_nodes[node_idx].root_gain;
bst_gpair_precise missing = parent_sum - shared_sum;
bool missing_left;
float gain = thread_active
? loss_chg_missing(bin, missing, parent_sum, parent_gain,
gpu_param, missing_left)
: -FLT_MAX;
__syncthreads();
// Find thread with best gain
ArgMaxT tuple(threadIdx.x, gain);
ArgMaxT best =
MaxReduceT(temp_storage.max_reduce).Reduce(tuple, hipcub::ArgMax());
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
float fvalue;
int gidx = (scan_begin - (level_node_idx * n_bins)) + threadIdx.x;
if (threadIdx.x == 0 &&
begin == scan_begin) { // check at start of first tile
fvalue = d_fidx_min_map[fidx];
} else {
fvalue = d_gidx_fvalue_map[gidx - 1];
}
bst_gpair_precise left = missing_left ? bin + missing : bin;
bst_gpair_precise right = parent_sum - left;
split.Update(gain, missing_left, fvalue, fidx, left, right, gpu_param);
}
__syncthreads();
} // end scan
} // end over features
// Create node
if (threadIdx.x == 0) {
if (d_nodes_temp == NULL) {
d_nodes[node_idx].split = split;
} else {
d_nodes_temp[blockIdx.x] = d_nodes[node_idx]; // first copy node values
d_nodes_temp[blockIdx.x].split = split; // now assign split
}
// if (depth == 0) {
// split.Print();
// }
Node *Nodeleft, *Noderight;
bool* left_child_smallest;
if (d_nodes_temp == NULL) {
Nodeleft = &d_nodes[left_child_nidx(node_idx)];
Noderight = &d_nodes[right_child_nidx(node_idx)];
left_child_smallest =
&d_left_child_smallest_temp[node_idx]; // NOTE: not per level, even
// though _temp variable name
} else {
Nodeleft = &d_nodes_child_temp[blockIdx.x * 2 + 0];
Noderight = &d_nodes_child_temp[blockIdx.x * 2 + 1];
left_child_smallest = &d_left_child_smallest_temp[blockIdx.x];
}
*Nodeleft =
Node(split.left_sum,
CalcGain(gpu_param, split.left_sum.grad, split.left_sum.hess),
CalcWeight(gpu_param, split.left_sum.grad, split.left_sum.hess));
*Noderight =
Node(split.right_sum,
CalcGain(gpu_param, split.right_sum.grad, split.right_sum.hess),
CalcWeight(gpu_param, split.right_sum.grad, split.right_sum.hess));
// Record smallest node
if (split.left_sum.hess <= split.right_sum.hess) {
*left_child_smallest = true;
} else {
*left_child_smallest = false;
}
}
}
#define MIN_BLOCK_THREADS 32
#define CHUNK_BLOCK_THREADS 32
// MAX_BLOCK_THREADS of 1024 is hard-coded maximum block size due
// to CUDA compatibility 35 and above requirement
// for Maximum number of threads per block
#define MAX_BLOCK_THREADS 1024
void GPUHistBuilder::FindSplit(int depth) {
// Specialised based on max_bins
this->FindSplitSpecialize<MIN_BLOCK_THREADS>(depth);
}
template <>
void GPUHistBuilder::FindSplitSpecialize<MAX_BLOCK_THREADS>(int depth) {
LaunchFindSplit<MAX_BLOCK_THREADS>(depth);
}
template <int BLOCK_THREADS>
void GPUHistBuilder::FindSplitSpecialize(int depth) {
if (param.max_bin <= BLOCK_THREADS) {
LaunchFindSplit<BLOCK_THREADS>(depth);
} else {
this->FindSplitSpecialize<BLOCK_THREADS + CHUNK_BLOCK_THREADS>(depth);
}
}
template <int BLOCK_THREADS>
void GPUHistBuilder::LaunchFindSplit(int depth) {
bool colsample =
param.colsample_bylevel < 1.0 || param.colsample_bytree < 1.0;
int dosimuljob = 1;
int simuljob = 1; // whether to do job on single GPU and broadcast (0) or to
// do same job on each GPU (1) (could make user parameter,
// but too fine-grained maybe)
int findsplit_shardongpus = 0; // too expensive generally, disable for now
if (findsplit_shardongpus) {
dosimuljob = 0;
// use power of 2 for split finder because nodes are power of 2 (broadcast
// result to remaining devices)
int find_split_n_devices = ::pow(2, ::floor(std::log2(n_devices)));
find_split_n_devices = ::min(n_nodes_level(depth), find_split_n_devices);
int num_nodes_device = n_nodes_level(depth) / find_split_n_devices;
int num_nodes_child_device =
n_nodes_level(depth + 1) / find_split_n_devices;
const int GRID_SIZE = num_nodes_device;
// NOTE: No need to scatter before gather as all devices have same copy of
// nodes, and within find_split_kernel() nodes_temp is given values from
// nodes
// for all nodes (split among devices) find best split per node
for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
int nodes_offset_device = d_idx * num_nodes_device;
hipLaunchKernelGGL(( find_split_kernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), 0, 0,
(const bst_gpair_precise*)(hist_vec[d_idx].GetLevelPtr(depth)),
feature_segments[d_idx].data(), depth, (info->num_col),
(hmat_.row_ptr.back()), nodes[d_idx].data(), nodes_temp[d_idx].data(),
nodes_child_temp[d_idx].data(), nodes_offset_device,
fidx_min_map[d_idx].data(), gidx_fvalue_map[d_idx].data(),
GPUTrainingParam(param), left_child_smallest_temp[d_idx].data(),
colsample, feature_flags[d_idx].data());
}
// nccl only on devices that did split
dh::synchronize_n_devices(find_split_n_devices, dList);
for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_nccl(ncclAllGather(
reinterpret_cast<const void*>(nodes_temp[d_idx].data()),
num_nodes_device * sizeof(Node) / sizeof(char), ncclChar,
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)),
find_split_comms[find_split_n_devices - 1][d_idx],
*(streams[d_idx])));
if (depth !=
param.max_depth) { // don't copy over children nodes if no more nodes
dh::safe_nccl(ncclAllGather(
reinterpret_cast<const void*>(nodes_child_temp[d_idx].data()),
num_nodes_child_device * sizeof(Node) / sizeof(char), ncclChar,
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)),
find_split_comms[find_split_n_devices - 1][d_idx],
*(streams[d_idx]))); // Note offset by n_nodes(depth)
// for recvbuff for child nodes
}
dh::safe_nccl(ncclAllGather(
reinterpret_cast<const void*>(left_child_smallest_temp[d_idx].data()),
num_nodes_device * sizeof(bool) / sizeof(char), ncclChar,
reinterpret_cast<void*>(left_child_smallest[d_idx].data() +
n_nodes(depth - 1)),
find_split_comms[find_split_n_devices - 1][d_idx],
*(streams[d_idx])));
}
for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx])));
}
if (n_devices > find_split_n_devices && n_devices > 1) {
// if n_devices==1, no need to Bcast
// if find_split_n_devices==1, this is just a copy operation, else it
// copies
// from master to all nodes in case extra devices not involved in split
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
int master_device = dList[0];
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
if (depth != param.max_depth) { // don't copy over children nodes if no
// more nodes
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)),
n_nodes_level(depth + 1) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
}
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(left_child_smallest[d_idx].data() +
n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(bool) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx])));
}
}
} else if (simuljob == 0) {
dosimuljob = 0;
int num_nodes_device = n_nodes_level(depth);
const int GRID_SIZE = num_nodes_device;
int d_idx = 0;
int master_device = dList[d_idx];
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
int nodes_offset_device = d_idx * num_nodes_device;
hipLaunchKernelGGL(( find_split_kernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), 0, 0,
(const bst_gpair_precise*)(hist_vec[d_idx].GetLevelPtr(depth)),
feature_segments[d_idx].data(), depth, (info->num_col),
(hmat_.row_ptr.back()), nodes[d_idx].data(), NULL, NULL,
nodes_offset_device, fidx_min_map[d_idx].data(),
gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param),
left_child_smallest[d_idx].data(), colsample,
feature_flags[d_idx].data());
// broadcast result
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
if (depth !=
param.max_depth) { // don't copy over children nodes if no more nodes
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)),
n_nodes_level(depth + 1) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
}
dh::safe_nccl(
ncclBcast(reinterpret_cast<void*>(left_child_smallest[d_idx].data() +
n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(bool) / sizeof(char),
ncclChar, master_device, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx])));
}
} else {
dosimuljob = 1;
}
if (dosimuljob) { // if no NCCL or simuljob==1, do this
int num_nodes_device = n_nodes_level(depth);
const int GRID_SIZE = num_nodes_device;
// all GPUs do same work
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
int nodes_offset_device = 0;
hipLaunchKernelGGL(( find_split_kernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), 0, 0,
(const bst_gpair_precise*)(hist_vec[d_idx].GetLevelPtr(depth)),
feature_segments[d_idx].data(), depth, (info->num_col),
(hmat_.row_ptr.back()), nodes[d_idx].data(), NULL, NULL,
nodes_offset_device, fidx_min_map[d_idx].data(),
gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param),
left_child_smallest[d_idx].data(), colsample,
feature_flags[d_idx].data());
}
}
// NOTE: No need to syncrhonize with host as all above pure P2P ops or
// on-device ops
}
void GPUHistBuilder::InitFirstNode(const std::vector<bst_gpair>& gpair) {
// Perform asynchronous reduction on each gpu
std::vector<bst_gpair> device_sums(n_devices);
#pragma omp parallel for num_threads(n_devices)
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
auto begin = device_gpair[d_idx].tbegin();
auto end = device_gpair[d_idx].tend();
bst_gpair init = bst_gpair();
auto binary_op = thrust::plus<bst_gpair>();
device_sums[d_idx] = thrust::reduce(begin, end, init, binary_op);
}
bst_gpair sum = bst_gpair();
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
sum += device_sums[d_idx];
}
// Setup first node so all devices have same first node (here done same on all
// devices, or could have done one device and Bcast if worried about exact
// precision issues)
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_nodes = nodes[d_idx].data();
auto gpu_param = GPUTrainingParam(param);
dh::launch_n(device_idx, 1, [=] __device__(int idx) {
bst_gpair sum_gradients = sum;
d_nodes[idx] =
Node(sum_gradients,
CalcGain(gpu_param, sum_gradients.grad, sum_gradients.hess),
CalcWeight(gpu_param, sum_gradients.grad, sum_gradients.hess));
});
}
// synch all devices to host before moving on (No, can avoid because BuildHist
// calls another kernel in default stream)
// dh::synchronize_n_devices(n_devices, dList);
}
void GPUHistBuilder::UpdatePosition(int depth) {
if (is_dense) {
this->UpdatePositionDense(depth);
} else {
this->UpdatePositionSparse(depth);
}
}
void GPUHistBuilder::UpdatePositionDense(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_position = position[d_idx].data();
Node* d_nodes = nodes[d_idx].data();
auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data();
auto d_gidx = device_matrix[d_idx].gidx;
int n_columns = info->num_col;
size_t begin = device_row_segments[d_idx];
size_t end = device_row_segments[d_idx + 1];
dh::launch_n(device_idx, end - begin, [=] __device__(size_t local_idx) {
int pos = d_position[local_idx];
if (!is_active(pos, depth)) {
return;
}
Node node = d_nodes[pos];
if (node.IsLeaf()) {
return;
}
int gidx = d_gidx[local_idx *
static_cast<size_t>(n_columns) + static_cast<size_t>(node.split.findex)];
float fvalue = d_gidx_fvalue_map[gidx];
if (fvalue <= node.split.fvalue) {
d_position[local_idx] = left_child_nidx(pos);
} else {
d_position[local_idx] = right_child_nidx(pos);
}
});
}
dh::synchronize_n_devices(n_devices, dList);
// dh::safe_cuda(hipDeviceSynchronize());
}
void GPUHistBuilder::UpdatePositionSparse(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_position = position[d_idx].data();
auto d_position_tmp = position_tmp[d_idx].data();
Node* d_nodes = nodes[d_idx].data();
auto d_gidx_feature_map = gidx_feature_map[d_idx].data();
auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data();
auto d_gidx = device_matrix[d_idx].gidx;
auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin();
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
size_t element_begin = device_element_segments[d_idx];
size_t element_end = device_element_segments[d_idx + 1];
// Update missing direction
dh::launch_n(device_idx, row_end - row_begin,
[=] __device__(int local_idx) {
int pos = d_position[local_idx];
if (!is_active(pos, depth)) {
d_position_tmp[local_idx] = pos;
return;
}
Node node = d_nodes[pos];
if (node.IsLeaf()) {
d_position_tmp[local_idx] = pos;
return;
} else if (node.split.missing_left) {
d_position_tmp[local_idx] = pos * 2 + 1;
} else {
d_position_tmp[local_idx] = pos * 2 + 2;
}
});
// Update node based on fvalue where exists
// OPTMARK: This kernel is very inefficient for both compute and memory,
// dominated by memory dependency / access patterns
dh::TransformLbs(
device_idx, &temp_memory[d_idx], element_end - element_begin, d_row_ptr,
row_end - row_begin, is_dense, [=] __device__(size_t local_idx, int local_ridx) {
int pos = d_position[local_ridx];
if (!is_active(pos, depth)) {
return;
}
Node node = d_nodes[pos];
if (node.IsLeaf()) {
return;
}
int gidx = d_gidx[local_idx];
int findex = d_gidx_feature_map[gidx]; // OPTMARK: slowest global
// memory access, maybe setup
// position, gidx, etc. as
// combined structure?
if (findex == node.split.findex) {
float fvalue = d_gidx_fvalue_map[gidx];
if (fvalue <= node.split.fvalue) {
d_position_tmp[local_ridx] = left_child_nidx(pos);
} else {
d_position_tmp[local_ridx] = right_child_nidx(pos);
}
}
});
position[d_idx] = position_tmp[d_idx];
}
dh::synchronize_n_devices(n_devices, dList);
}
void GPUHistBuilder::ColSampleTree() {
if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return;
feature_set_tree.resize(info->num_col);
std::iota(feature_set_tree.begin(), feature_set_tree.end(), 0);
feature_set_tree = col_sample(feature_set_tree, param.colsample_bytree);
}
void GPUHistBuilder::ColSampleLevel() {
if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return;
feature_set_level.resize(feature_set_tree.size());
feature_set_level = col_sample(feature_set_tree, param.colsample_bylevel);
std::vector<int> h_feature_flags(info->num_col, 0);
for (auto fidx : feature_set_level) {
h_feature_flags[fidx] = 1;
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
feature_flags[d_idx] = h_feature_flags;
}
dh::synchronize_n_devices(n_devices, dList);
}
bool GPUHistBuilder::UpdatePredictionCache(
const DMatrix* data, std::vector<bst_float>* p_out_preds) {
std::vector<bst_float>& out_preds = *p_out_preds;
if (nodes.empty() || !p_last_fmat_ || data != p_last_fmat_) {
return false;
}
if (!prediction_cache_initialised) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
prediction_cache[d_idx].copy(out_preds.begin() + row_begin,
out_preds.begin() + row_end);
}
prediction_cache_initialised = true;
}
dh::synchronize_n_devices(n_devices, dList);
float eps = param.learning_rate;
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
auto d_nodes = nodes[d_idx].data();
auto d_position = position[d_idx].data();
auto d_prediction_cache = prediction_cache[d_idx].data();
dh::launch_n(device_idx, prediction_cache[d_idx].size(),
[=] __device__(int local_idx) {
int pos = d_position[local_idx];
d_prediction_cache[local_idx] += d_nodes[pos].weight * eps;
});
thrust::copy(prediction_cache[d_idx].tbegin(),
prediction_cache[d_idx].tend(), &out_preds[row_begin]);
}
dh::synchronize_n_devices(n_devices, dList);
return true;
}
void GPUHistBuilder::Update(const std::vector<bst_gpair>& gpair,
DMatrix* p_fmat, RegTree* p_tree) {
this->InitData(gpair, *p_fmat, *p_tree);
this->InitFirstNode(gpair);
this->ColSampleTree();
for (int depth = 0; depth < param.max_depth; depth++) {
this->ColSampleLevel();
this->BuildHist(depth);
this->FindSplit(depth);
this->UpdatePosition(depth);
}
// done with multi-GPU, pass back result from master to tree on host
int master_device = dList[0];
dh::safe_cuda(hipSetDevice(master_device));
dense2sparse_tree(p_tree, nodes[0].tbegin(), nodes[0].tend(), param);
}
} // namespace tree
} // namespace xgboost
| 1e98b7e38dc5eb147fd4ade3b528d3c6e05a13cf.cu | /*!
* Copyright 2017 Rory mitchell
*/
#include <thrust/binary_search.h>
#include <thrust/count.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <cub/cub.cuh>
#include <algorithm>
#include <functional>
#include <future>
#include <numeric>
#include "common.cuh"
#include "device_helpers.cuh"
#include "dmlc/timer.h"
#include "gpu_hist_builder.cuh"
namespace xgboost {
namespace tree {
void DeviceGMat::Init(int device_idx, const common::GHistIndexMatrix& gmat,
bst_ulong element_begin, bst_ulong element_end,
bst_ulong row_begin, bst_ulong row_end, int n_bins) {
dh::safe_cuda(cudaSetDevice(device_idx));
CHECK(gidx_buffer.size()) << "gidx_buffer must be externally allocated";
CHECK_EQ(row_ptr.size(), (row_end - row_begin) + 1)
<< "row_ptr must be externally allocated";
common::CompressedBufferWriter cbw(n_bins);
std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size());
cbw.Write(host_buffer.data(), gmat.index.begin() + element_begin,
gmat.index.begin() + element_end);
gidx_buffer = host_buffer;
gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), n_bins);
// row_ptr
thrust::copy(gmat.row_ptr.data() + row_begin,
gmat.row_ptr.data() + row_end + 1, row_ptr.tbegin());
// normalise row_ptr
size_t start = gmat.row_ptr[row_begin];
thrust::transform(row_ptr.tbegin(), row_ptr.tend(), row_ptr.tbegin(),
[=] __device__(size_t val) { return val - start; });
}
void DeviceHist::Init(int n_bins_in) {
this->n_bins = n_bins_in;
CHECK(!data.empty()) << "DeviceHist must be externally allocated";
}
void DeviceHist::Reset(int device_idx) {
cudaSetDevice(device_idx);
data.fill(bst_gpair_precise());
}
bst_gpair_precise* DeviceHist::GetLevelPtr(int depth) {
return data.data() + n_nodes(depth - 1) * n_bins;
}
int DeviceHist::LevelSize(int depth) { return n_bins * n_nodes_level(depth); }
HistBuilder DeviceHist::GetBuilder() {
return HistBuilder(data.data(), n_bins);
}
HistBuilder::HistBuilder(bst_gpair_precise* ptr, int n_bins)
: d_hist(ptr), n_bins(n_bins) {}
// Define double precision atomic add for older architectures
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address; // NOLINT
unsigned long long int old = *address_as_ull, assumed; // NOLINT
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__device__ void HistBuilder::Add(bst_gpair_precise gpair, int gidx, int nidx) const {
int hist_idx = nidx * n_bins + gidx;
atomicAdd(&(d_hist[hist_idx].grad), gpair.grad); // OPTMARK: This and below
// line lead to about 3X
// slowdown due to memory
// dependency and access
// pattern issues.
atomicAdd(&(d_hist[hist_idx].hess), gpair.hess);
}
__device__ bst_gpair_precise HistBuilder::Get(int gidx, int nidx) const {
return d_hist[nidx * n_bins + gidx];
}
GPUHistBuilder::GPUHistBuilder()
: initialised(false),
is_dense(false),
p_last_fmat_(nullptr),
prediction_cache_initialised(false) {}
GPUHistBuilder::~GPUHistBuilder() {
if (initialised) {
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
ncclCommDestroy(comms[d_idx]);
dh::safe_cuda(cudaSetDevice(dList[d_idx]));
dh::safe_cuda(cudaStreamDestroy(*(streams[d_idx])));
}
for (int num_d = 1; num_d <= n_devices;
++num_d) { // loop over number of devices used
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
ncclCommDestroy(find_split_comms[num_d - 1][d_idx]);
}
}
}
}
void GPUHistBuilder::Init(const TrainParam& param) {
CHECK(param.max_depth < 16) << "Tree depth too large.";
CHECK(param.max_depth != 0) << "Tree depth cannot be 0.";
CHECK(param.grow_policy != TrainParam::kLossGuide)
<< "Loss guided growth policy not supported. Use CPU algorithm.";
this->param = param;
CHECK(param.n_gpus != 0) << "Must have at least one device";
int n_devices_all = dh::n_devices_all(param.n_gpus);
for (int device_idx = 0; device_idx < n_devices_all; device_idx++) {
if (!param.silent) {
size_t free_memory = dh::available_memory(device_idx);
const int mb_size = 1048576;
LOG(CONSOLE) << "Device: [" << device_idx << "] "
<< dh::device_name(device_idx) << " with "
<< free_memory / mb_size << " MB available device memory.";
}
}
}
void GPUHistBuilder::InitData(const std::vector<bst_gpair>& gpair,
DMatrix& fmat, // NOLINT
const RegTree& tree) {
// set member num_rows and n_devices for rest of GPUHistBuilder members
info = &fmat.info();
num_rows = info->num_row;
n_devices = dh::n_devices(param.n_gpus, num_rows);
if (!initialised) {
// set dList member
dList.resize(n_devices);
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices();
dList[d_idx] = device_idx;
}
// initialize nccl
comms.resize(n_devices);
streams.resize(n_devices);
dh::safe_nccl(ncclCommInitAll(comms.data(), n_devices,
dList.data())); // initialize communicator
// (One communicator per
// process)
// printf("# NCCL: Using devices\n");
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
streams[d_idx] =
reinterpret_cast<cudaStream_t*>(malloc(sizeof(cudaStream_t)));
dh::safe_cuda(cudaSetDevice(dList[d_idx]));
dh::safe_cuda(cudaStreamCreate(streams[d_idx]));
int cudaDev;
int rank;
cudaDeviceProp prop;
dh::safe_nccl(ncclCommCuDevice(comms[d_idx], &cudaDev));
dh::safe_nccl(ncclCommUserRank(comms[d_idx], &rank));
dh::safe_cuda(cudaGetDeviceProperties(&prop, cudaDev));
// printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
// prop.pciBusID, prop.name);
fflush(stdout);
}
// local find_split group of comms for each case of reduced number of GPUs
// to use
find_split_comms.resize(
n_devices,
std::vector<ncclComm_t>(n_devices)); // TODO(JCM): Excessive, but
// ok, and best to do
// here instead of
// repeatedly
for (int num_d = 1; num_d <= n_devices;
++num_d) { // loop over number of devices used
dh::safe_nccl(ncclCommInitAll(find_split_comms[num_d - 1].data(), num_d,
dList.data())); // initialize communicator
// (One communicator per
// process)
}
CHECK(fmat.SingleColBlock()) << "grow_gpu_hist: must have single column "
"block. Try setting 'tree_method' "
"parameter to 'exact'";
is_dense = info->num_nonzero == info->num_col * info->num_row;
hmat_.Init(&fmat, param.max_bin);
gmat_.cut = &hmat_;
gmat_.Init(&fmat);
int n_bins = hmat_.row_ptr.back();
int n_features = hmat_.row_ptr.size() - 1;
// deliniate data onto multiple gpus
device_row_segments.push_back(0);
device_element_segments.push_back(0);
bst_uint offset = 0;
bst_uint shard_size = std::ceil(static_cast<double>(num_rows) / n_devices);
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
offset += shard_size;
offset = std::min(offset, num_rows);
device_row_segments.push_back(offset);
device_element_segments.push_back(gmat_.row_ptr[offset]);
}
// Build feature segments
std::vector<int> h_feature_segments;
for (int node = 0; node < n_nodes_level(param.max_depth - 1); node++) {
for (int fidx = 0; fidx < n_features; fidx++) {
h_feature_segments.push_back(hmat_.row_ptr[fidx] + node * n_bins);
}
}
h_feature_segments.push_back(n_nodes_level(param.max_depth - 1) * n_bins);
// Construct feature map
std::vector<int> h_gidx_feature_map(n_bins);
for (int fidx = 0; fidx < n_features; fidx++) {
for (int i = hmat_.row_ptr[fidx]; i < hmat_.row_ptr[fidx + 1]; i++) {
h_gidx_feature_map[i] = fidx;
}
}
int level_max_bins = n_nodes_level(param.max_depth - 1) * n_bins;
// allocate unique common data that reside on master device (NOTE: None
// currently)
// int master_device=dList[0];
// ba.allocate(master_device, );
// allocate vectors across all devices
temp_memory.resize(n_devices);
hist_vec.resize(n_devices);
nodes.resize(n_devices);
nodes_temp.resize(n_devices);
nodes_child_temp.resize(n_devices);
left_child_smallest.resize(n_devices);
left_child_smallest_temp.resize(n_devices);
feature_flags.resize(n_devices);
fidx_min_map.resize(n_devices);
feature_segments.resize(n_devices);
prediction_cache.resize(n_devices);
position.resize(n_devices);
position_tmp.resize(n_devices);
device_matrix.resize(n_devices);
device_gpair.resize(n_devices);
gidx_feature_map.resize(n_devices);
gidx_fvalue_map.resize(n_devices);
int find_split_n_devices = std::pow(2, std::floor(std::log2(n_devices)));
find_split_n_devices =
std::min(n_nodes_level(param.max_depth), find_split_n_devices);
int max_num_nodes_device =
n_nodes_level(param.max_depth) / find_split_n_devices;
// num_rows_segment: for sharding rows onto gpus for splitting data
// num_elements_segment: for sharding rows (of elements) onto gpus for
// splitting data
// max_num_nodes_device: for sharding nodes onto gpus for split finding
// All other variables have full copy on gpu, with copy either being
// identical or just current portion (like for histogram) before AllReduce
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
bst_uint num_rows_segment =
device_row_segments[d_idx + 1] - device_row_segments[d_idx];
bst_ulong num_elements_segment =
device_element_segments[d_idx + 1] - device_element_segments[d_idx];
ba.allocate(
device_idx, &(hist_vec[d_idx].data),
n_nodes(param.max_depth - 1) * n_bins, &nodes[d_idx],
n_nodes(param.max_depth), &nodes_temp[d_idx], max_num_nodes_device,
&nodes_child_temp[d_idx], max_num_nodes_device,
&left_child_smallest[d_idx], n_nodes(param.max_depth),
&left_child_smallest_temp[d_idx], max_num_nodes_device,
&feature_flags[d_idx],
n_features, // may change but same on all devices
&fidx_min_map[d_idx],
hmat_.min_val.size(), // constant and same on all devices
&feature_segments[d_idx],
h_feature_segments.size(), // constant and same on all devices
&prediction_cache[d_idx], num_rows_segment, &position[d_idx],
num_rows_segment, &position_tmp[d_idx], num_rows_segment,
&device_gpair[d_idx], num_rows_segment,
&device_matrix[d_idx].gidx_buffer,
common::CompressedBufferWriter::CalculateBufferSize(
num_elements_segment,
n_bins), // constant and same on all devices
&device_matrix[d_idx].row_ptr, num_rows_segment + 1,
&gidx_feature_map[d_idx], n_bins, // constant and same on all devices
&gidx_fvalue_map[d_idx],
hmat_.cut.size()); // constant and same on all devices
// Copy Host to Device (assumes comes after ba.allocate that sets device)
device_matrix[d_idx].Init(
device_idx, gmat_, device_element_segments[d_idx],
device_element_segments[d_idx + 1], device_row_segments[d_idx],
device_row_segments[d_idx + 1], n_bins);
gidx_feature_map[d_idx] = h_gidx_feature_map;
gidx_fvalue_map[d_idx] = hmat_.cut;
feature_segments[d_idx] = h_feature_segments;
fidx_min_map[d_idx] = hmat_.min_val;
// Initialize, no copy
hist_vec[d_idx].Init(n_bins); // init host object
prediction_cache[d_idx].fill(0); // init device object (assumes comes
// after ba.allocate that sets device)
feature_flags[d_idx].fill(1); // init device object (assumes comes after
// ba.allocate that sets device)
}
if (!param.silent) {
const int mb_size = 1048576;
LOG(CONSOLE) << "Allocated " << ba.size() / mb_size << " MB";
}
initialised = true;
}
// copy or init to do every iteration
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
nodes[d_idx].fill(Node());
nodes_temp[d_idx].fill(Node());
nodes_child_temp[d_idx].fill(Node());
position[d_idx].fill(0);
device_gpair[d_idx].copy(gpair.begin() + device_row_segments[d_idx],
gpair.begin() + device_row_segments[d_idx + 1]);
subsample_gpair(&device_gpair[d_idx], param.subsample,
device_row_segments[d_idx]);
hist_vec[d_idx].Reset(device_idx);
// left_child_smallest and left_child_smallest_temp don't need to be
// initialized
}
dh::synchronize_n_devices(n_devices, dList);
p_last_fmat_ = &fmat;
}
void GPUHistBuilder::BuildHist(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t begin = device_element_segments[d_idx];
size_t end = device_element_segments[d_idx + 1];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
auto d_gidx = device_matrix[d_idx].gidx;
auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin();
auto d_position = position[d_idx].data();
auto d_gpair = device_gpair[d_idx].data();
auto d_left_child_smallest = left_child_smallest[d_idx].data();
auto hist_builder = hist_vec[d_idx].GetBuilder();
dh::TransformLbs(
device_idx, &temp_memory[d_idx], end - begin, d_row_ptr,
row_end - row_begin, is_dense, [=] __device__(size_t local_idx, int local_ridx) {
int nidx = d_position[local_ridx]; // OPTMARK: latency
if (!is_active(nidx, depth)) return;
// Only increment smallest node
bool is_smallest = (d_left_child_smallest[parent_nidx(nidx)] &&
is_left_child(nidx)) ||
(!d_left_child_smallest[parent_nidx(nidx)] &&
!is_left_child(nidx));
if (!is_smallest && depth > 0) return;
int gidx = d_gidx[local_idx];
bst_gpair gpair = d_gpair[local_ridx];
hist_builder.Add(gpair, gidx,
nidx); // OPTMARK: This is slow, could use
// shared memory or cache results
// intead of writing to global
// memory every time in atomic way.
});
}
dh::synchronize_n_devices(n_devices, dList);
// time.printElapsed("Add Time");
// (in-place) reduce each element of histogram (for only current level) across
// multiple gpus
// TODO(JCM): use out of place with pre-allocated buffer, but then have to
// copy
// back on device
// fprintf(stderr,"sizeof(bst_gpair)/sizeof(float)=%d\n",sizeof(bst_gpair)/sizeof(float));
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_nccl(ncclAllReduce(
reinterpret_cast<const void*>(hist_vec[d_idx].GetLevelPtr(depth)),
reinterpret_cast<void*>(hist_vec[d_idx].GetLevelPtr(depth)),
hist_vec[d_idx].LevelSize(depth) * sizeof(bst_gpair_precise) / sizeof(double),
ncclDouble, ncclSum, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx])));
}
// if no NCCL, then presume only 1 GPU, then already correct
// time.printElapsed("Reduce-Add Time");
// Subtraction trick (applied to all devices in same way -- to avoid doing on
// master and then Bcast)
if (depth > 0) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
auto hist_builder = hist_vec[d_idx].GetBuilder();
auto d_left_child_smallest = left_child_smallest[d_idx].data();
int n_sub_bins = (n_nodes_level(depth) / 2) * hist_builder.n_bins;
dh::launch_n(device_idx, n_sub_bins, [=] __device__(int idx) {
int nidx = n_nodes(depth - 1) + ((idx / hist_builder.n_bins) * 2);
bool left_smallest = d_left_child_smallest[parent_nidx(nidx)];
if (left_smallest) {
nidx++; // If left is smallest switch to right child
}
int gidx = idx % hist_builder.n_bins;
bst_gpair_precise parent = hist_builder.Get(gidx, parent_nidx(nidx));
int other_nidx = left_smallest ? nidx - 1 : nidx + 1;
bst_gpair_precise other = hist_builder.Get(gidx, other_nidx);
hist_builder.Add(parent - other, gidx,
nidx); // OPTMARK: This is slow, could use shared
// memory or cache results intead of writing to
// global memory every time in atomic way.
});
}
dh::synchronize_n_devices(n_devices, dList);
}
}
template <int BLOCK_THREADS>
__global__ void find_split_kernel(
const bst_gpair_precise* d_level_hist, int* d_feature_segments, int depth,
int n_features, int n_bins, Node* d_nodes, Node* d_nodes_temp,
Node* d_nodes_child_temp, int nodes_offset_device, float* d_fidx_min_map,
float* d_gidx_fvalue_map, GPUTrainingParam gpu_param,
bool* d_left_child_smallest_temp, bool colsample, int* d_feature_flags) {
typedef cub::KeyValuePair<int, float> ArgMaxT;
typedef cub::BlockScan<bst_gpair_precise, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>
BlockScanT;
typedef cub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT;
typedef cub::BlockReduce<bst_gpair_precise, BLOCK_THREADS> SumReduceT;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
struct UninitializedSplit : cub::Uninitialized<Split> {};
struct UninitializedGpair : cub::Uninitialized<bst_gpair_precise> {};
__shared__ UninitializedSplit uninitialized_split;
Split& split = uninitialized_split.Alias();
__shared__ UninitializedGpair uninitialized_sum;
bst_gpair_precise& shared_sum = uninitialized_sum.Alias();
__shared__ ArgMaxT block_max;
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
split = Split();
}
__syncthreads();
// below two are for accessing full-sized node list stored on each device
// always one block per node, BLOCK_THREADS threads per block
int level_node_idx = blockIdx.x + nodes_offset_device;
int node_idx = n_nodes(depth - 1) + level_node_idx;
for (int fidx = 0; fidx < n_features; fidx++) {
if (colsample && d_feature_flags[fidx] == 0) continue;
int begin = d_feature_segments[level_node_idx * n_features + fidx];
int end = d_feature_segments[level_node_idx * n_features + fidx + 1];
bst_gpair_precise feature_sum = bst_gpair_precise();
for (int reduce_begin = begin; reduce_begin < end;
reduce_begin += BLOCK_THREADS) {
bool thread_active = reduce_begin + threadIdx.x < end;
// Scan histogram
bst_gpair_precise bin = thread_active ? d_level_hist[reduce_begin + threadIdx.x]
: bst_gpair_precise();
feature_sum +=
SumReduceT(temp_storage.sum_reduce).Reduce(bin, cub::Sum());
}
if (threadIdx.x == 0) {
shared_sum = feature_sum;
}
// __syncthreads(); // no need to synch because below there is a Scan
GpairCallbackOp prefix_op = GpairCallbackOp();
for (int scan_begin = begin; scan_begin < end;
scan_begin += BLOCK_THREADS) {
bool thread_active = scan_begin + threadIdx.x < end;
bst_gpair_precise bin =
thread_active ? d_level_hist[scan_begin + threadIdx.x] : bst_gpair_precise();
BlockScanT(temp_storage.scan)
.ExclusiveScan(bin, bin, cub::Sum(), prefix_op);
// Calculate gain
bst_gpair_precise parent_sum = d_nodes[node_idx].sum_gradients;
float parent_gain = d_nodes[node_idx].root_gain;
bst_gpair_precise missing = parent_sum - shared_sum;
bool missing_left;
float gain = thread_active
? loss_chg_missing(bin, missing, parent_sum, parent_gain,
gpu_param, missing_left)
: -FLT_MAX;
__syncthreads();
// Find thread with best gain
ArgMaxT tuple(threadIdx.x, gain);
ArgMaxT best =
MaxReduceT(temp_storage.max_reduce).Reduce(tuple, cub::ArgMax());
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
float fvalue;
int gidx = (scan_begin - (level_node_idx * n_bins)) + threadIdx.x;
if (threadIdx.x == 0 &&
begin == scan_begin) { // check at start of first tile
fvalue = d_fidx_min_map[fidx];
} else {
fvalue = d_gidx_fvalue_map[gidx - 1];
}
bst_gpair_precise left = missing_left ? bin + missing : bin;
bst_gpair_precise right = parent_sum - left;
split.Update(gain, missing_left, fvalue, fidx, left, right, gpu_param);
}
__syncthreads();
} // end scan
} // end over features
// Create node
if (threadIdx.x == 0) {
if (d_nodes_temp == NULL) {
d_nodes[node_idx].split = split;
} else {
d_nodes_temp[blockIdx.x] = d_nodes[node_idx]; // first copy node values
d_nodes_temp[blockIdx.x].split = split; // now assign split
}
// if (depth == 0) {
// split.Print();
// }
Node *Nodeleft, *Noderight;
bool* left_child_smallest;
if (d_nodes_temp == NULL) {
Nodeleft = &d_nodes[left_child_nidx(node_idx)];
Noderight = &d_nodes[right_child_nidx(node_idx)];
left_child_smallest =
&d_left_child_smallest_temp[node_idx]; // NOTE: not per level, even
// though _temp variable name
} else {
Nodeleft = &d_nodes_child_temp[blockIdx.x * 2 + 0];
Noderight = &d_nodes_child_temp[blockIdx.x * 2 + 1];
left_child_smallest = &d_left_child_smallest_temp[blockIdx.x];
}
*Nodeleft =
Node(split.left_sum,
CalcGain(gpu_param, split.left_sum.grad, split.left_sum.hess),
CalcWeight(gpu_param, split.left_sum.grad, split.left_sum.hess));
*Noderight =
Node(split.right_sum,
CalcGain(gpu_param, split.right_sum.grad, split.right_sum.hess),
CalcWeight(gpu_param, split.right_sum.grad, split.right_sum.hess));
// Record smallest node
if (split.left_sum.hess <= split.right_sum.hess) {
*left_child_smallest = true;
} else {
*left_child_smallest = false;
}
}
}
#define MIN_BLOCK_THREADS 32
#define CHUNK_BLOCK_THREADS 32
// MAX_BLOCK_THREADS of 1024 is hard-coded maximum block size due
// to CUDA compatibility 35 and above requirement
// for Maximum number of threads per block
#define MAX_BLOCK_THREADS 1024
void GPUHistBuilder::FindSplit(int depth) {
// Specialised based on max_bins
this->FindSplitSpecialize<MIN_BLOCK_THREADS>(depth);
}
template <>
void GPUHistBuilder::FindSplitSpecialize<MAX_BLOCK_THREADS>(int depth) {
LaunchFindSplit<MAX_BLOCK_THREADS>(depth);
}
template <int BLOCK_THREADS>
void GPUHistBuilder::FindSplitSpecialize(int depth) {
if (param.max_bin <= BLOCK_THREADS) {
LaunchFindSplit<BLOCK_THREADS>(depth);
} else {
this->FindSplitSpecialize<BLOCK_THREADS + CHUNK_BLOCK_THREADS>(depth);
}
}
template <int BLOCK_THREADS>
void GPUHistBuilder::LaunchFindSplit(int depth) {
bool colsample =
param.colsample_bylevel < 1.0 || param.colsample_bytree < 1.0;
int dosimuljob = 1;
int simuljob = 1; // whether to do job on single GPU and broadcast (0) or to
// do same job on each GPU (1) (could make user parameter,
// but too fine-grained maybe)
int findsplit_shardongpus = 0; // too expensive generally, disable for now
if (findsplit_shardongpus) {
dosimuljob = 0;
// use power of 2 for split finder because nodes are power of 2 (broadcast
// result to remaining devices)
int find_split_n_devices = std::pow(2, std::floor(std::log2(n_devices)));
find_split_n_devices = std::min(n_nodes_level(depth), find_split_n_devices);
int num_nodes_device = n_nodes_level(depth) / find_split_n_devices;
int num_nodes_child_device =
n_nodes_level(depth + 1) / find_split_n_devices;
const int GRID_SIZE = num_nodes_device;
// NOTE: No need to scatter before gather as all devices have same copy of
// nodes, and within find_split_kernel() nodes_temp is given values from
// nodes
// for all nodes (split among devices) find best split per node
for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
int nodes_offset_device = d_idx * num_nodes_device;
find_split_kernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS>>>(
(const bst_gpair_precise*)(hist_vec[d_idx].GetLevelPtr(depth)),
feature_segments[d_idx].data(), depth, (info->num_col),
(hmat_.row_ptr.back()), nodes[d_idx].data(), nodes_temp[d_idx].data(),
nodes_child_temp[d_idx].data(), nodes_offset_device,
fidx_min_map[d_idx].data(), gidx_fvalue_map[d_idx].data(),
GPUTrainingParam(param), left_child_smallest_temp[d_idx].data(),
colsample, feature_flags[d_idx].data());
}
// nccl only on devices that did split
dh::synchronize_n_devices(find_split_n_devices, dList);
for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_nccl(ncclAllGather(
reinterpret_cast<const void*>(nodes_temp[d_idx].data()),
num_nodes_device * sizeof(Node) / sizeof(char), ncclChar,
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)),
find_split_comms[find_split_n_devices - 1][d_idx],
*(streams[d_idx])));
if (depth !=
param.max_depth) { // don't copy over children nodes if no more nodes
dh::safe_nccl(ncclAllGather(
reinterpret_cast<const void*>(nodes_child_temp[d_idx].data()),
num_nodes_child_device * sizeof(Node) / sizeof(char), ncclChar,
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)),
find_split_comms[find_split_n_devices - 1][d_idx],
*(streams[d_idx]))); // Note offset by n_nodes(depth)
// for recvbuff for child nodes
}
dh::safe_nccl(ncclAllGather(
reinterpret_cast<const void*>(left_child_smallest_temp[d_idx].data()),
num_nodes_device * sizeof(bool) / sizeof(char), ncclChar,
reinterpret_cast<void*>(left_child_smallest[d_idx].data() +
n_nodes(depth - 1)),
find_split_comms[find_split_n_devices - 1][d_idx],
*(streams[d_idx])));
}
for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx])));
}
if (n_devices > find_split_n_devices && n_devices > 1) {
// if n_devices==1, no need to Bcast
// if find_split_n_devices==1, this is just a copy operation, else it
// copies
// from master to all nodes in case extra devices not involved in split
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
int master_device = dList[0];
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
if (depth != param.max_depth) { // don't copy over children nodes if no
// more nodes
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)),
n_nodes_level(depth + 1) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
}
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(left_child_smallest[d_idx].data() +
n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(bool) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx])));
}
}
} else if (simuljob == 0) {
dosimuljob = 0;
int num_nodes_device = n_nodes_level(depth);
const int GRID_SIZE = num_nodes_device;
int d_idx = 0;
int master_device = dList[d_idx];
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
int nodes_offset_device = d_idx * num_nodes_device;
find_split_kernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS>>>(
(const bst_gpair_precise*)(hist_vec[d_idx].GetLevelPtr(depth)),
feature_segments[d_idx].data(), depth, (info->num_col),
(hmat_.row_ptr.back()), nodes[d_idx].data(), NULL, NULL,
nodes_offset_device, fidx_min_map[d_idx].data(),
gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param),
left_child_smallest[d_idx].data(), colsample,
feature_flags[d_idx].data());
// broadcast result
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
if (depth !=
param.max_depth) { // don't copy over children nodes if no more nodes
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)),
n_nodes_level(depth + 1) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
}
dh::safe_nccl(
ncclBcast(reinterpret_cast<void*>(left_child_smallest[d_idx].data() +
n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(bool) / sizeof(char),
ncclChar, master_device, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx])));
}
} else {
dosimuljob = 1;
}
if (dosimuljob) { // if no NCCL or simuljob==1, do this
int num_nodes_device = n_nodes_level(depth);
const int GRID_SIZE = num_nodes_device;
// all GPUs do same work
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
int nodes_offset_device = 0;
find_split_kernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS>>>(
(const bst_gpair_precise*)(hist_vec[d_idx].GetLevelPtr(depth)),
feature_segments[d_idx].data(), depth, (info->num_col),
(hmat_.row_ptr.back()), nodes[d_idx].data(), NULL, NULL,
nodes_offset_device, fidx_min_map[d_idx].data(),
gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param),
left_child_smallest[d_idx].data(), colsample,
feature_flags[d_idx].data());
}
}
// NOTE: No need to syncrhonize with host as all above pure P2P ops or
// on-device ops
}
void GPUHistBuilder::InitFirstNode(const std::vector<bst_gpair>& gpair) {
// Perform asynchronous reduction on each gpu
std::vector<bst_gpair> device_sums(n_devices);
#pragma omp parallel for num_threads(n_devices)
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
auto begin = device_gpair[d_idx].tbegin();
auto end = device_gpair[d_idx].tend();
bst_gpair init = bst_gpair();
auto binary_op = thrust::plus<bst_gpair>();
device_sums[d_idx] = thrust::reduce(begin, end, init, binary_op);
}
bst_gpair sum = bst_gpair();
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
sum += device_sums[d_idx];
}
// Setup first node so all devices have same first node (here done same on all
// devices, or could have done one device and Bcast if worried about exact
// precision issues)
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_nodes = nodes[d_idx].data();
auto gpu_param = GPUTrainingParam(param);
dh::launch_n(device_idx, 1, [=] __device__(int idx) {
bst_gpair sum_gradients = sum;
d_nodes[idx] =
Node(sum_gradients,
CalcGain(gpu_param, sum_gradients.grad, sum_gradients.hess),
CalcWeight(gpu_param, sum_gradients.grad, sum_gradients.hess));
});
}
// synch all devices to host before moving on (No, can avoid because BuildHist
// calls another kernel in default stream)
// dh::synchronize_n_devices(n_devices, dList);
}
void GPUHistBuilder::UpdatePosition(int depth) {
if (is_dense) {
this->UpdatePositionDense(depth);
} else {
this->UpdatePositionSparse(depth);
}
}
void GPUHistBuilder::UpdatePositionDense(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_position = position[d_idx].data();
Node* d_nodes = nodes[d_idx].data();
auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data();
auto d_gidx = device_matrix[d_idx].gidx;
int n_columns = info->num_col;
size_t begin = device_row_segments[d_idx];
size_t end = device_row_segments[d_idx + 1];
dh::launch_n(device_idx, end - begin, [=] __device__(size_t local_idx) {
int pos = d_position[local_idx];
if (!is_active(pos, depth)) {
return;
}
Node node = d_nodes[pos];
if (node.IsLeaf()) {
return;
}
int gidx = d_gidx[local_idx *
static_cast<size_t>(n_columns) + static_cast<size_t>(node.split.findex)];
float fvalue = d_gidx_fvalue_map[gidx];
if (fvalue <= node.split.fvalue) {
d_position[local_idx] = left_child_nidx(pos);
} else {
d_position[local_idx] = right_child_nidx(pos);
}
});
}
dh::synchronize_n_devices(n_devices, dList);
// dh::safe_cuda(cudaDeviceSynchronize());
}
void GPUHistBuilder::UpdatePositionSparse(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_position = position[d_idx].data();
auto d_position_tmp = position_tmp[d_idx].data();
Node* d_nodes = nodes[d_idx].data();
auto d_gidx_feature_map = gidx_feature_map[d_idx].data();
auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data();
auto d_gidx = device_matrix[d_idx].gidx;
auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin();
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
size_t element_begin = device_element_segments[d_idx];
size_t element_end = device_element_segments[d_idx + 1];
// Update missing direction
dh::launch_n(device_idx, row_end - row_begin,
[=] __device__(int local_idx) {
int pos = d_position[local_idx];
if (!is_active(pos, depth)) {
d_position_tmp[local_idx] = pos;
return;
}
Node node = d_nodes[pos];
if (node.IsLeaf()) {
d_position_tmp[local_idx] = pos;
return;
} else if (node.split.missing_left) {
d_position_tmp[local_idx] = pos * 2 + 1;
} else {
d_position_tmp[local_idx] = pos * 2 + 2;
}
});
// Update node based on fvalue where exists
// OPTMARK: This kernel is very inefficient for both compute and memory,
// dominated by memory dependency / access patterns
dh::TransformLbs(
device_idx, &temp_memory[d_idx], element_end - element_begin, d_row_ptr,
row_end - row_begin, is_dense, [=] __device__(size_t local_idx, int local_ridx) {
int pos = d_position[local_ridx];
if (!is_active(pos, depth)) {
return;
}
Node node = d_nodes[pos];
if (node.IsLeaf()) {
return;
}
int gidx = d_gidx[local_idx];
int findex = d_gidx_feature_map[gidx]; // OPTMARK: slowest global
// memory access, maybe setup
// position, gidx, etc. as
// combined structure?
if (findex == node.split.findex) {
float fvalue = d_gidx_fvalue_map[gidx];
if (fvalue <= node.split.fvalue) {
d_position_tmp[local_ridx] = left_child_nidx(pos);
} else {
d_position_tmp[local_ridx] = right_child_nidx(pos);
}
}
});
position[d_idx] = position_tmp[d_idx];
}
dh::synchronize_n_devices(n_devices, dList);
}
void GPUHistBuilder::ColSampleTree() {
if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return;
feature_set_tree.resize(info->num_col);
std::iota(feature_set_tree.begin(), feature_set_tree.end(), 0);
feature_set_tree = col_sample(feature_set_tree, param.colsample_bytree);
}
void GPUHistBuilder::ColSampleLevel() {
if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return;
feature_set_level.resize(feature_set_tree.size());
feature_set_level = col_sample(feature_set_tree, param.colsample_bylevel);
std::vector<int> h_feature_flags(info->num_col, 0);
for (auto fidx : feature_set_level) {
h_feature_flags[fidx] = 1;
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
feature_flags[d_idx] = h_feature_flags;
}
dh::synchronize_n_devices(n_devices, dList);
}
bool GPUHistBuilder::UpdatePredictionCache(
const DMatrix* data, std::vector<bst_float>* p_out_preds) {
std::vector<bst_float>& out_preds = *p_out_preds;
if (nodes.empty() || !p_last_fmat_ || data != p_last_fmat_) {
return false;
}
if (!prediction_cache_initialised) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
prediction_cache[d_idx].copy(out_preds.begin() + row_begin,
out_preds.begin() + row_end);
}
prediction_cache_initialised = true;
}
dh::synchronize_n_devices(n_devices, dList);
float eps = param.learning_rate;
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
auto d_nodes = nodes[d_idx].data();
auto d_position = position[d_idx].data();
auto d_prediction_cache = prediction_cache[d_idx].data();
dh::launch_n(device_idx, prediction_cache[d_idx].size(),
[=] __device__(int local_idx) {
int pos = d_position[local_idx];
d_prediction_cache[local_idx] += d_nodes[pos].weight * eps;
});
thrust::copy(prediction_cache[d_idx].tbegin(),
prediction_cache[d_idx].tend(), &out_preds[row_begin]);
}
dh::synchronize_n_devices(n_devices, dList);
return true;
}
void GPUHistBuilder::Update(const std::vector<bst_gpair>& gpair,
DMatrix* p_fmat, RegTree* p_tree) {
this->InitData(gpair, *p_fmat, *p_tree);
this->InitFirstNode(gpair);
this->ColSampleTree();
for (int depth = 0; depth < param.max_depth; depth++) {
this->ColSampleLevel();
this->BuildHist(depth);
this->FindSplit(depth);
this->UpdatePosition(depth);
}
// done with multi-GPU, pass back result from master to tree on host
int master_device = dList[0];
dh::safe_cuda(cudaSetDevice(master_device));
dense2sparse_tree(p_tree, nodes[0].tbegin(), nodes[0].tend(), param);
}
} // namespace tree
} // namespace xgboost
|
1912b6c388ce49152d0265aa3d6ed42c77614e78.hip | // !!! This is a file automatically generated by hipify!!!
/* This code accompanies
* Two relaxation time lattice Boltzmann method coupled to fast Fourier transform Poisson solver: Application to electroconvective flow, Journal of Computational Physics
* https://doi.org/10.1016/j.jcp.2019.07.029
* Numerical analysis of electroconvection in cross-flow with unipolar charge injection, Physical Review Fluids
* https://doi.org/10.1103/PhysRevFluids.4.103701
* Yifei Guan, Igor Novosselov
* University of Washington
*
* Author: Yifei Guan
*
*/
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "LBM.h"
#include <hip/device_functions.h>
#define RAD 1
__global__ void gpu_poisson(double*, double*,double*);
__global__ void gpu_efield(double*, double*, double*);
__global__ void odd_extension(double*, hipfftDoubleComplex*);
__global__ void gpu_derivative(double*, double*, hipfftDoubleComplex*);
__global__ void odd_extract(double*, hipfftDoubleComplex*);
__global__ void gpu_bc(double*);
__device__ __forceinline__ size_t gpu_s_scalar_index(unsigned int x, unsigned int y)
{
return (2*RAD + nThreads)*y + x;
}
__host__
void poisson_phi(double *charge_gpu, double *phi_gpu)
{
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
unsigned int it = 0;
double MAX_ITERATIONS = 1.0E6;
double TOLERANCE = 1.0e-9;
double *Res = (double*)malloc(mem_size_scalar);
double error = 0.0;
double *R;
checkCudaErrors(hipMalloc((void**)&R, mem_size_scalar));
for (it = 0; it < MAX_ITERATIONS; ++it) {
error = 0.0;
gpu_poisson << < grid, threads >> > (charge_gpu, phi_gpu, R);
checkCudaErrors(hipMemcpy(Res, R, mem_size_scalar, hipMemcpyDeviceToHost));
for (unsigned int y = 0; y < NY; ++y) {
for (unsigned int x = 0; x < NX; ++x) {
//if (it % 1000 == 1) printf("%g\n", error);
if (error < Res[scalar_index(x, y)]) error = Res[scalar_index(x, y)];
}
}
if (error < TOLERANCE) break;
}
checkCudaErrors(hipFree(R));
free(Res);
//printf("%g\n", error);
if (it == MAX_ITERATIONS) {
printf("Poisson solver did not converge!\n");
printf("Residual = %g\n", error);
system("pause");
//exit(-1);
}
getLastCudaError("Poisson solver kernel error");
}
__global__ void gpu_poisson(double *c, double *fi,double *R){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int s_y = threadIdx.y + RAD;
unsigned int s_x = threadIdx.x + RAD;
unsigned int xp1 = (x + blockDim.x) % NX;
unsigned int yp1 = (y + blockDim.y) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
__shared__ double s_in[(2*RAD + nThreads)*3];
// load to shared memory (regular cells)
s_in[gpu_s_scalar_index(s_x,s_y)] = fi[gpu_scalar_index(x, y)];
// load halo cells
if (threadIdx.x < RAD) {
s_in[gpu_s_scalar_index(s_x - RAD, s_y)] = fi[gpu_scalar_index(xm1, y)];
s_in[gpu_s_scalar_index(s_x + blockDim.x, s_y)] = fi[gpu_scalar_index(xp1, y)];
}
if (threadIdx.y < RAD) {
s_in[gpu_s_scalar_index(s_x, s_y - RAD)] = fi[gpu_scalar_index(x, ym1)];
s_in[gpu_s_scalar_index(s_x, s_y + blockDim.y)] = fi[gpu_scalar_index(x, yp1)];
}
// Boundary conditions
if (y == 0) {
fi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY - 1) {
fi[gpu_scalar_index(x, y)] = 0.0;
return;
}
__syncthreads();
double charge = c[gpu_scalar_index(x, y)];
//double phi = fi[gpu_scalar_index(x, y)];
//double phiL = fi[gpu_scalar_index(xm1, y)];
//double phiR = fi[gpu_scalar_index(xp1, y)];
//double phiU = fi[gpu_scalar_index(x, yp1)];
//double phiD = fi[gpu_scalar_index(x, ym1)];
double phi = s_in[gpu_s_scalar_index(s_x, s_y)];
double phiL = s_in[gpu_s_scalar_index(s_x-1, s_y)];
double phiR = s_in[gpu_s_scalar_index(s_x+1, s_y)];
double phiU = s_in[gpu_s_scalar_index(s_x, s_y+1)];
double phiD = s_in[gpu_s_scalar_index(s_x, s_y-1)];
double source = (charge / eps) * dx *dx; // Right hand side of the equation
double phi_old = phi;
phi = 0.25 * (phiL + phiR + phiU + phiD + source);
// Record the error
R[gpu_scalar_index(x, y)] = fabs(phi - phi_old);
//__syncthreads();
fi[gpu_scalar_index(x, y)] = phi;
//if (x == 5 && y == 5) printf("%g\n", phi);
}
__host__
void efield(double *phi_gpu, double *Ex_gpu, double *Ey_gpu) {
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_efield << < grid, threads >> > (phi_gpu, Ex_gpu, Ey_gpu);
gpu_bc << <grid, threads >> > (Ey_gpu);
getLastCudaError("Efield kernel error");
}
__global__ void gpu_efield(double *fi, double *ex, double *ey){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int xp1 = (x + 1) % NX;
unsigned int yp1 = (y + 1) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
double phi = fi[gpu_scalar_index(x, y)];
double phiL = fi[gpu_scalar_index(xm1, y)];
double phiR = fi[gpu_scalar_index(xp1, y)];
double phiU = fi[gpu_scalar_index(x, yp1)];
double phiD = fi[gpu_scalar_index(x, ym1)];
ex[gpu_scalar_index(x, y)] = 0.5*(phiL - phiR) / dx;
ey[gpu_scalar_index(x, y)] = 0.5*(phiD - phiU) / dy;
}
__global__ void gpu_bc(double *ey) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
//ex[gpu_scalar_index(x, 0)] = ex[gpu_scalar_index(x, 1)];
ey[gpu_scalar_index(x, 0)] = ey[gpu_scalar_index(x, 1)];
return;
}
if (y == NY - 1) {
//ex[gpu_scalar_index(x, NY - 1)] = ex[gpu_scalar_index(x, NY - 2)];
ey[gpu_scalar_index(x, NY - 1)] = ey[gpu_scalar_index(x, NY - 2)];
return;
}
}
// =========================================================================
// Fast poisson solver domain extension
// =========================================================================
__host__ void extension(double *c, hipfftDoubleComplex *c_ext) {
// blocks in grid
dim3 grid(NX / nThreads, NE, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
odd_extension << < grid, threads >> > (c, c_ext);
getLastCudaError("Odd Extension error");
}
__global__ void odd_extension(double *charge, hipfftDoubleComplex *charge_ext) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
charge_ext[gpu_scalar_index(x, y)].x = 0.0;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == 1) {
charge_ext[gpu_scalar_index(x, y)].x = -charge[gpu_scalar_index(x, y)] / eps - voltage / dy / dy;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y > 1 && y < NY - 1) {
charge_ext[gpu_scalar_index(x, y)].x = -charge[gpu_scalar_index(x, y)] / eps;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == NY - 1) {
charge_ext[gpu_scalar_index(x, y)].x = 0.0;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y > NY - 1 && y<NE-1) {
charge_ext[gpu_scalar_index(x, y)].x = charge[gpu_scalar_index(x, NE - y)] / eps;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == NE - 1) {
charge_ext[gpu_scalar_index(x, y)].x = charge[gpu_scalar_index(x, 1)] / eps + voltage / dy / dy;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
}
__host__ void derivative(double *kx, double *ky, hipfftDoubleComplex *source) {
// blocks in grid
dim3 grid(NX / nThreads, NE, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_derivative << < grid, threads >> > (kx, ky, source);
getLastCudaError("Gpu derivative error");
}
__global__ void gpu_derivative(double *kx, double *ky, hipfftDoubleComplex *source) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
double I = kx[x];
double J = ky[y];
double mu = (4.0 / dy / dy)*(sin(J*dy*0.5)*sin(J*dy*0.5)) + I*I;
if (y == 0 && x == 0) mu = 1.0;
source[gpu_scalar_index(x, y)].x = -source[gpu_scalar_index(x, y)].x / mu;
source[gpu_scalar_index(x, y)].y = -source[gpu_scalar_index(x, y)].y / mu;
}
__host__ void extract(double *fi, hipfftDoubleComplex *fi_ext) {
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
odd_extract << < grid, threads >> > (fi, fi_ext);
getLastCudaError("Extraction error");
}
__global__ void odd_extract(double *phi, hipfftDoubleComplex *phi_ext) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
phi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY-1) {
phi[gpu_scalar_index(x, y)] = 0.0;
return;
}
phi[gpu_scalar_index(x, y)] = phi_ext[gpu_scalar_index(x, y)].x/size;
}
| 1912b6c388ce49152d0265aa3d6ed42c77614e78.cu | /* This code accompanies
* Two relaxation time lattice Boltzmann method coupled to fast Fourier transform Poisson solver: Application to electroconvective flow, Journal of Computational Physics
* https://doi.org/10.1016/j.jcp.2019.07.029
* Numerical analysis of electroconvection in cross-flow with unipolar charge injection, Physical Review Fluids
* https://doi.org/10.1103/PhysRevFluids.4.103701
* Yifei Guan, Igor Novosselov
* University of Washington
*
* Author: Yifei Guan
*
*/
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <cufft.h>
#include "LBM.h"
#include <device_functions.h>
#define RAD 1
__global__ void gpu_poisson(double*, double*,double*);
__global__ void gpu_efield(double*, double*, double*);
__global__ void odd_extension(double*, cufftDoubleComplex*);
__global__ void gpu_derivative(double*, double*, cufftDoubleComplex*);
__global__ void odd_extract(double*, cufftDoubleComplex*);
__global__ void gpu_bc(double*);
__device__ __forceinline__ size_t gpu_s_scalar_index(unsigned int x, unsigned int y)
{
return (2*RAD + nThreads)*y + x;
}
__host__
void poisson_phi(double *charge_gpu, double *phi_gpu)
{
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
unsigned int it = 0;
double MAX_ITERATIONS = 1.0E6;
double TOLERANCE = 1.0e-9;
double *Res = (double*)malloc(mem_size_scalar);
double error = 0.0;
double *R;
checkCudaErrors(cudaMalloc((void**)&R, mem_size_scalar));
for (it = 0; it < MAX_ITERATIONS; ++it) {
error = 0.0;
gpu_poisson << < grid, threads >> > (charge_gpu, phi_gpu, R);
checkCudaErrors(cudaMemcpy(Res, R, mem_size_scalar, cudaMemcpyDeviceToHost));
for (unsigned int y = 0; y < NY; ++y) {
for (unsigned int x = 0; x < NX; ++x) {
//if (it % 1000 == 1) printf("%g\n", error);
if (error < Res[scalar_index(x, y)]) error = Res[scalar_index(x, y)];
}
}
if (error < TOLERANCE) break;
}
checkCudaErrors(cudaFree(R));
free(Res);
//printf("%g\n", error);
if (it == MAX_ITERATIONS) {
printf("Poisson solver did not converge!\n");
printf("Residual = %g\n", error);
system("pause");
//exit(-1);
}
getLastCudaError("Poisson solver kernel error");
}
__global__ void gpu_poisson(double *c, double *fi,double *R){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int s_y = threadIdx.y + RAD;
unsigned int s_x = threadIdx.x + RAD;
unsigned int xp1 = (x + blockDim.x) % NX;
unsigned int yp1 = (y + blockDim.y) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
__shared__ double s_in[(2*RAD + nThreads)*3];
// load to shared memory (regular cells)
s_in[gpu_s_scalar_index(s_x,s_y)] = fi[gpu_scalar_index(x, y)];
// load halo cells
if (threadIdx.x < RAD) {
s_in[gpu_s_scalar_index(s_x - RAD, s_y)] = fi[gpu_scalar_index(xm1, y)];
s_in[gpu_s_scalar_index(s_x + blockDim.x, s_y)] = fi[gpu_scalar_index(xp1, y)];
}
if (threadIdx.y < RAD) {
s_in[gpu_s_scalar_index(s_x, s_y - RAD)] = fi[gpu_scalar_index(x, ym1)];
s_in[gpu_s_scalar_index(s_x, s_y + blockDim.y)] = fi[gpu_scalar_index(x, yp1)];
}
// Boundary conditions
if (y == 0) {
fi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY - 1) {
fi[gpu_scalar_index(x, y)] = 0.0;
return;
}
__syncthreads();
double charge = c[gpu_scalar_index(x, y)];
//double phi = fi[gpu_scalar_index(x, y)];
//double phiL = fi[gpu_scalar_index(xm1, y)];
//double phiR = fi[gpu_scalar_index(xp1, y)];
//double phiU = fi[gpu_scalar_index(x, yp1)];
//double phiD = fi[gpu_scalar_index(x, ym1)];
double phi = s_in[gpu_s_scalar_index(s_x, s_y)];
double phiL = s_in[gpu_s_scalar_index(s_x-1, s_y)];
double phiR = s_in[gpu_s_scalar_index(s_x+1, s_y)];
double phiU = s_in[gpu_s_scalar_index(s_x, s_y+1)];
double phiD = s_in[gpu_s_scalar_index(s_x, s_y-1)];
double source = (charge / eps) * dx *dx; // Right hand side of the equation
double phi_old = phi;
phi = 0.25 * (phiL + phiR + phiU + phiD + source);
// Record the error
R[gpu_scalar_index(x, y)] = fabs(phi - phi_old);
//__syncthreads();
fi[gpu_scalar_index(x, y)] = phi;
//if (x == 5 && y == 5) printf("%g\n", phi);
}
__host__
void efield(double *phi_gpu, double *Ex_gpu, double *Ey_gpu) {
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_efield << < grid, threads >> > (phi_gpu, Ex_gpu, Ey_gpu);
gpu_bc << <grid, threads >> > (Ey_gpu);
getLastCudaError("Efield kernel error");
}
__global__ void gpu_efield(double *fi, double *ex, double *ey){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int xp1 = (x + 1) % NX;
unsigned int yp1 = (y + 1) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
double phi = fi[gpu_scalar_index(x, y)];
double phiL = fi[gpu_scalar_index(xm1, y)];
double phiR = fi[gpu_scalar_index(xp1, y)];
double phiU = fi[gpu_scalar_index(x, yp1)];
double phiD = fi[gpu_scalar_index(x, ym1)];
ex[gpu_scalar_index(x, y)] = 0.5*(phiL - phiR) / dx;
ey[gpu_scalar_index(x, y)] = 0.5*(phiD - phiU) / dy;
}
__global__ void gpu_bc(double *ey) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
//ex[gpu_scalar_index(x, 0)] = ex[gpu_scalar_index(x, 1)];
ey[gpu_scalar_index(x, 0)] = ey[gpu_scalar_index(x, 1)];
return;
}
if (y == NY - 1) {
//ex[gpu_scalar_index(x, NY - 1)] = ex[gpu_scalar_index(x, NY - 2)];
ey[gpu_scalar_index(x, NY - 1)] = ey[gpu_scalar_index(x, NY - 2)];
return;
}
}
// =========================================================================
// Fast poisson solver domain extension
// =========================================================================
__host__ void extension(double *c, cufftDoubleComplex *c_ext) {
// blocks in grid
dim3 grid(NX / nThreads, NE, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
odd_extension << < grid, threads >> > (c, c_ext);
getLastCudaError("Odd Extension error");
}
__global__ void odd_extension(double *charge, cufftDoubleComplex *charge_ext) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
charge_ext[gpu_scalar_index(x, y)].x = 0.0;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == 1) {
charge_ext[gpu_scalar_index(x, y)].x = -charge[gpu_scalar_index(x, y)] / eps - voltage / dy / dy;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y > 1 && y < NY - 1) {
charge_ext[gpu_scalar_index(x, y)].x = -charge[gpu_scalar_index(x, y)] / eps;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == NY - 1) {
charge_ext[gpu_scalar_index(x, y)].x = 0.0;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y > NY - 1 && y<NE-1) {
charge_ext[gpu_scalar_index(x, y)].x = charge[gpu_scalar_index(x, NE - y)] / eps;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == NE - 1) {
charge_ext[gpu_scalar_index(x, y)].x = charge[gpu_scalar_index(x, 1)] / eps + voltage / dy / dy;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
}
__host__ void derivative(double *kx, double *ky, cufftDoubleComplex *source) {
// blocks in grid
dim3 grid(NX / nThreads, NE, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_derivative << < grid, threads >> > (kx, ky, source);
getLastCudaError("Gpu derivative error");
}
__global__ void gpu_derivative(double *kx, double *ky, cufftDoubleComplex *source) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
double I = kx[x];
double J = ky[y];
double mu = (4.0 / dy / dy)*(sin(J*dy*0.5)*sin(J*dy*0.5)) + I*I;
if (y == 0 && x == 0) mu = 1.0;
source[gpu_scalar_index(x, y)].x = -source[gpu_scalar_index(x, y)].x / mu;
source[gpu_scalar_index(x, y)].y = -source[gpu_scalar_index(x, y)].y / mu;
}
__host__ void extract(double *fi, cufftDoubleComplex *fi_ext) {
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
odd_extract << < grid, threads >> > (fi, fi_ext);
getLastCudaError("Extraction error");
}
__global__ void odd_extract(double *phi, cufftDoubleComplex *phi_ext) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
phi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY-1) {
phi[gpu_scalar_index(x, y)] = 0.0;
return;
}
phi[gpu_scalar_index(x, y)] = phi_ext[gpu_scalar_index(x, y)].x/size;
}
|
f450209ad0b1d83cee842e8a6448c2168323aac4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "addback_spikes.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
hipMalloc(&Params, XSIZE*YSIZE);
const int *st = NULL;
hipMalloc(&st, XSIZE*YSIZE);
const int *id = NULL;
hipMalloc(&id, XSIZE*YSIZE);
const float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
const int *count = NULL;
hipMalloc(&count, XSIZE*YSIZE);
float *dataraw = NULL;
hipMalloc(&dataraw, XSIZE*YSIZE);
const float *W = NULL;
hipMalloc(&W, XSIZE*YSIZE);
const float *U = NULL;
hipMalloc(&U, XSIZE*YSIZE);
const int iter = 1;
const float *spkscore = NULL;
hipMalloc(&spkscore, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
addback_spikes), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,st,id,x,count,dataraw,W,U,iter,spkscore);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
addback_spikes), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,st,id,x,count,dataraw,W,U,iter,spkscore);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
addback_spikes), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,st,id,x,count,dataraw,W,U,iter,spkscore);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f450209ad0b1d83cee842e8a6448c2168323aac4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "addback_spikes.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
cudaMalloc(&Params, XSIZE*YSIZE);
const int *st = NULL;
cudaMalloc(&st, XSIZE*YSIZE);
const int *id = NULL;
cudaMalloc(&id, XSIZE*YSIZE);
const float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
const int *count = NULL;
cudaMalloc(&count, XSIZE*YSIZE);
float *dataraw = NULL;
cudaMalloc(&dataraw, XSIZE*YSIZE);
const float *W = NULL;
cudaMalloc(&W, XSIZE*YSIZE);
const float *U = NULL;
cudaMalloc(&U, XSIZE*YSIZE);
const int iter = 1;
const float *spkscore = NULL;
cudaMalloc(&spkscore, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
addback_spikes<<<gridBlock,threadBlock>>>(Params,st,id,x,count,dataraw,W,U,iter,spkscore);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
addback_spikes<<<gridBlock,threadBlock>>>(Params,st,id,x,count,dataraw,W,U,iter,spkscore);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
addback_spikes<<<gridBlock,threadBlock>>>(Params,st,id,x,count,dataraw,W,U,iter,spkscore);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c11c79c8e0eb427fa7333d3d7e81aa9ef25941ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "clone_engine_kernels_seed.h"
#include "clone_engine_cuda_helpers.h"
#include "BestCands.h"
#include "computeChi2_kernels.h"
#include "index_selection_kernels.h"
#include "reorganize_gplex.h"
#include "build_tracks_kernels.h"
#include "array_algorithms_cu.h"
#include "propagation_kernels.h"
#include <cstdio>
constexpr int BLOCK_SIZE_X = 256;
__device__ void findCandidates_fn_seed(const int ilay,
Hit *hits, const GPlexQI &XHitSize, const GPlexHitIdx &XHitArr,
const GPlexLS &propErr, GPlexHS &msErr, GPlexHV &msPar,
const GPlexLV &propPar, GPlexQF &outChi2,
GPlexQF &Chi2, GPlexQI *HitsIdx_arr,
GPlexQB &Valid,
const int maxSize, const int itrack, const int icand, const int N,
CandsGPU::BestCands<Config::maxCandsPerSeed, BLOCK_SIZE_X>& cand_list)
{
if (itrack >= N) return;
findTrackCands_fn(ilay, hits, XHitSize, XHitArr,
propErr, msErr, msPar, propPar, outChi2, Chi2, HitsIdx_arr,
maxSize, itrack, icand, N, cand_list);
}
__device__ void updateCandsWithInfo_seed(Track *candidates, int *ntracks_per_seed,
int iseed_ev, CandsGPU::BestCands<Config::maxCandsPerSeed, BLOCK_SIZE_X>& cand_list)
{
int iseed_block = threadIdx.x;
Track new_cands[Config::maxCandsPerSeed]; // too big too be __shared__
for (int icand = 0; icand < ntracks_per_seed[iseed_ev]; ++icand) {
Track &tmp_track = new_cands[icand];
int my_trkIdx, my_hitIdx, my_nhits;
float my_chi2;
cand_list.get_cand_info(iseed_block, icand, my_trkIdx,
my_hitIdx, my_nhits, my_chi2);
if (0 <= my_trkIdx && my_trkIdx < ntracks_per_seed[iseed_ev]) {
Track& base_track = candidates[iseed_ev*Config::maxCandsPerSeed + my_trkIdx];
tmp_track = base_track;
tmp_track.addHitIdx(my_hitIdx, 0.f /*chi2*/);
tmp_track.setChi2(my_chi2);
}
} // icand
// Second loop required: several new candidates can come from the same old one.
for (int icand = 0; icand < ntracks_per_seed[iseed_ev]; ++icand) {
candidates[iseed_ev*Config::maxCandsPerSeed + icand] = new_cands[icand];
}
}
__global__ void findInLayers_kernel_seed (LayerOfHitsCU *layers,
EtaBinOfCombCandidatesCU* etabins_of_comb_candidates,
GPlexQI XHitSize, GPlexHitIdx XHitArr,
GPlexLS Err_iP, GPlexLV Par_iP,
GPlexHS *msErr_arr, GPlexHV *msPar_arr,
GPlexLS Err_iC, GPlexLV Par_iC,
GPlexQF outChi2,
GPlexQF Chi2, GPlexQI *HitsIdx_arr,
GPlexQI inChg, GPlexQI Label,
GPlexQI SeedIdx, GPlexQI CandIdx,
GPlexQB Valid, GeometryCU geom,
int *maxSize, int gplex_size,
const int nlayers_per_seed)
{
int iseed_plex = threadIdx.x + blockIdx.x * blockDim.x;
for (int ebin = 0; ebin < Config::nEtaBin; ebin++) {
EtaBinOfCombCandidatesCU& etabin_of_cands = etabins_of_comb_candidates[ebin];
int nseed = etabin_of_cands.m_nseed;
if (nseed == 0) continue;
//int max_nseed_plex = gplex_size; // no more /Config::maxCandsPerSeed; -> one thread
int N = gplex_size;
int maxSize_block;
// we need both iseed_plex and iseed_ev:
// * iseed_plex to access the matriplex structures, used for the algorithm
// * iseed_ev to access the other structures (tracks, mostly), used to
// interact with the outside world.
for (int iseed_ev = iseed_plex;
iseed_ev < nseed;
iseed_ev += gridDim.x * blockDim.x) {
int iseed_block = threadIdx.x;
__shared__ CandsGPU::BestCands<Config::maxCandsPerSeed, BLOCK_SIZE_X> cand_list;
Track *tracks = etabin_of_cands.m_candidates.data();
int *tracks_per_seed = etabin_of_cands.m_ntracks_per_seed.data();
for (int ilay = nlayers_per_seed; ilay <= Config::nLayers; ++ilay) {
cand_list.reset(iseed_block);
int Nhits = ilay;
for (int icand_seed = 0; icand_seed < tracks_per_seed[iseed_ev]; ++icand_seed) {
InputTracksAndHitIdxComb_fn_seed(tracks, tracks_per_seed,
Err_iP, Par_iP,
inChg, Chi2, Label, HitsIdx_arr,
SeedIdx, CandIdx, Valid, Nhits ,
iseed_ev, icand_seed, iseed_plex, N);
// no cands at this position for this layer
if (ilay > nlayers_per_seed) {
UpdateWithLastHit_fn(layers[ilay-1], HitsIdx_arr[ilay-1],
msPar_arr[ilay-1], msErr_arr[ilay-1],
Par_iP, Err_iP, Par_iC, Err_iC, iseed_plex, N);
if (ilay < Config::nLayers) {
propagationForBuilding_fn(Err_iC, Par_iC, inChg, geom.radii[ilay],
Err_iP, Par_iP, iseed_plex, N);
OutputParErrCU_fn_seed(tracks, Err_iP, Par_iP,
iseed_ev, icand_seed, N);
} else {
OutputParErrCU_fn_seed(tracks, Err_iC, Par_iC,
iseed_ev, icand_seed, N);
return;
}
}
XHitSize[iseed_plex] = 0;
selectHitIndices_fn(layers[ilay], Err_iP, Par_iP,
XHitSize, XHitArr, iseed_plex, N);
reduceMaxPartial_fn<int, BLOCK_SIZE_X, 1, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS>
(XHitSize.ptr, blockDim.x * blockIdx.x, blockDim.x, &maxSize_block);
findCandidates_fn_seed(ilay,
layers[ilay].m_hits.data(), XHitSize, XHitArr,
Err_iP, msErr_arr[ilay], msPar_arr[ilay], Par_iP,
outChi2, Chi2, HitsIdx_arr, Valid,
maxSize_block, iseed_plex, icand_seed, N, cand_list);
} // icand_seed
cand_list.heap_sort(iseed_block, Config::maxCandsPerSeed);
tracks_per_seed[iseed_ev] = cand_list.count_valid_cands(iseed_block);
updateCandsWithInfo_seed(tracks, tracks_per_seed, iseed_ev, cand_list);
} // ilay
} // iseed
} // eta
}
void findInLayers_wrapper_seed(hipStream_t &stream,
LayerOfHitsCU *layers,
EventOfCombCandidatesCU &event_of_cands_cu,
GPlexQI &XHitSize, GPlexHitIdx &XHitArr,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexHS *msErr, GPlexHV *msPar,
GPlexLS &Err_iC, GPlexLV &Par_iC,
GPlexQF &outChi2,
GPlexQF &Chi2, GPlexQI *HitsIdx_arr,
GPlexQI &inChg, GPlexQI &Label,
GPlexQI &SeedIdx, GPlexQI &CandIdx,
GPlexQB& Valid,
GeometryCU &geom,
int *maxSize, int N)
{
int gridx = (N-1)/BLOCK_SIZE_X + 1;
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
hipLaunchKernelGGL(( findInLayers_kernel_seed) , dim3(grid), dim3(block), 0, stream,
layers,
event_of_cands_cu.m_etabins_of_comb_candidates.data(),
XHitSize, XHitArr, Err_iP, Par_iP, msErr, msPar,
Err_iC, Par_iC, outChi2, Chi2, HitsIdx_arr, inChg, Label,
SeedIdx, CandIdx, Valid, geom, maxSize, N, Config::nlayers_per_seed);
/*cudaCheckErrorSync();*/
}
| c11c79c8e0eb427fa7333d3d7e81aa9ef25941ad.cu | #include "clone_engine_kernels_seed.h"
#include "clone_engine_cuda_helpers.h"
#include "BestCands.h"
#include "computeChi2_kernels.h"
#include "index_selection_kernels.h"
#include "reorganize_gplex.h"
#include "build_tracks_kernels.h"
#include "array_algorithms_cu.h"
#include "propagation_kernels.h"
#include <cstdio>
constexpr int BLOCK_SIZE_X = 256;
__device__ void findCandidates_fn_seed(const int ilay,
Hit *hits, const GPlexQI &XHitSize, const GPlexHitIdx &XHitArr,
const GPlexLS &propErr, GPlexHS &msErr, GPlexHV &msPar,
const GPlexLV &propPar, GPlexQF &outChi2,
GPlexQF &Chi2, GPlexQI *HitsIdx_arr,
GPlexQB &Valid,
const int maxSize, const int itrack, const int icand, const int N,
CandsGPU::BestCands<Config::maxCandsPerSeed, BLOCK_SIZE_X>& cand_list)
{
if (itrack >= N) return;
findTrackCands_fn(ilay, hits, XHitSize, XHitArr,
propErr, msErr, msPar, propPar, outChi2, Chi2, HitsIdx_arr,
maxSize, itrack, icand, N, cand_list);
}
__device__ void updateCandsWithInfo_seed(Track *candidates, int *ntracks_per_seed,
int iseed_ev, CandsGPU::BestCands<Config::maxCandsPerSeed, BLOCK_SIZE_X>& cand_list)
{
int iseed_block = threadIdx.x;
Track new_cands[Config::maxCandsPerSeed]; // too big too be __shared__
for (int icand = 0; icand < ntracks_per_seed[iseed_ev]; ++icand) {
Track &tmp_track = new_cands[icand];
int my_trkIdx, my_hitIdx, my_nhits;
float my_chi2;
cand_list.get_cand_info(iseed_block, icand, my_trkIdx,
my_hitIdx, my_nhits, my_chi2);
if (0 <= my_trkIdx && my_trkIdx < ntracks_per_seed[iseed_ev]) {
Track& base_track = candidates[iseed_ev*Config::maxCandsPerSeed + my_trkIdx];
tmp_track = base_track;
tmp_track.addHitIdx(my_hitIdx, 0.f /*chi2*/);
tmp_track.setChi2(my_chi2);
}
} // icand
// Second loop required: several new candidates can come from the same old one.
for (int icand = 0; icand < ntracks_per_seed[iseed_ev]; ++icand) {
candidates[iseed_ev*Config::maxCandsPerSeed + icand] = new_cands[icand];
}
}
__global__ void findInLayers_kernel_seed (LayerOfHitsCU *layers,
EtaBinOfCombCandidatesCU* etabins_of_comb_candidates,
GPlexQI XHitSize, GPlexHitIdx XHitArr,
GPlexLS Err_iP, GPlexLV Par_iP,
GPlexHS *msErr_arr, GPlexHV *msPar_arr,
GPlexLS Err_iC, GPlexLV Par_iC,
GPlexQF outChi2,
GPlexQF Chi2, GPlexQI *HitsIdx_arr,
GPlexQI inChg, GPlexQI Label,
GPlexQI SeedIdx, GPlexQI CandIdx,
GPlexQB Valid, GeometryCU geom,
int *maxSize, int gplex_size,
const int nlayers_per_seed)
{
int iseed_plex = threadIdx.x + blockIdx.x * blockDim.x;
for (int ebin = 0; ebin < Config::nEtaBin; ebin++) {
EtaBinOfCombCandidatesCU& etabin_of_cands = etabins_of_comb_candidates[ebin];
int nseed = etabin_of_cands.m_nseed;
if (nseed == 0) continue;
//int max_nseed_plex = gplex_size; // no more /Config::maxCandsPerSeed; -> one thread
int N = gplex_size;
int maxSize_block;
// we need both iseed_plex and iseed_ev:
// * iseed_plex to access the matriplex structures, used for the algorithm
// * iseed_ev to access the other structures (tracks, mostly), used to
// interact with the outside world.
for (int iseed_ev = iseed_plex;
iseed_ev < nseed;
iseed_ev += gridDim.x * blockDim.x) {
int iseed_block = threadIdx.x;
__shared__ CandsGPU::BestCands<Config::maxCandsPerSeed, BLOCK_SIZE_X> cand_list;
Track *tracks = etabin_of_cands.m_candidates.data();
int *tracks_per_seed = etabin_of_cands.m_ntracks_per_seed.data();
for (int ilay = nlayers_per_seed; ilay <= Config::nLayers; ++ilay) {
cand_list.reset(iseed_block);
int Nhits = ilay;
for (int icand_seed = 0; icand_seed < tracks_per_seed[iseed_ev]; ++icand_seed) {
InputTracksAndHitIdxComb_fn_seed(tracks, tracks_per_seed,
Err_iP, Par_iP,
inChg, Chi2, Label, HitsIdx_arr,
SeedIdx, CandIdx, Valid, Nhits ,
iseed_ev, icand_seed, iseed_plex, N);
// no cands at this position for this layer
if (ilay > nlayers_per_seed) {
UpdateWithLastHit_fn(layers[ilay-1], HitsIdx_arr[ilay-1],
msPar_arr[ilay-1], msErr_arr[ilay-1],
Par_iP, Err_iP, Par_iC, Err_iC, iseed_plex, N);
if (ilay < Config::nLayers) {
propagationForBuilding_fn(Err_iC, Par_iC, inChg, geom.radii[ilay],
Err_iP, Par_iP, iseed_plex, N);
OutputParErrCU_fn_seed(tracks, Err_iP, Par_iP,
iseed_ev, icand_seed, N);
} else {
OutputParErrCU_fn_seed(tracks, Err_iC, Par_iC,
iseed_ev, icand_seed, N);
return;
}
}
XHitSize[iseed_plex] = 0;
selectHitIndices_fn(layers[ilay], Err_iP, Par_iP,
XHitSize, XHitArr, iseed_plex, N);
reduceMaxPartial_fn<int, BLOCK_SIZE_X, 1, cub::BLOCK_REDUCE_WARP_REDUCTIONS>
(XHitSize.ptr, blockDim.x * blockIdx.x, blockDim.x, &maxSize_block);
findCandidates_fn_seed(ilay,
layers[ilay].m_hits.data(), XHitSize, XHitArr,
Err_iP, msErr_arr[ilay], msPar_arr[ilay], Par_iP,
outChi2, Chi2, HitsIdx_arr, Valid,
maxSize_block, iseed_plex, icand_seed, N, cand_list);
} // icand_seed
cand_list.heap_sort(iseed_block, Config::maxCandsPerSeed);
tracks_per_seed[iseed_ev] = cand_list.count_valid_cands(iseed_block);
updateCandsWithInfo_seed(tracks, tracks_per_seed, iseed_ev, cand_list);
} // ilay
} // iseed
} // eta
}
void findInLayers_wrapper_seed(cudaStream_t &stream,
LayerOfHitsCU *layers,
EventOfCombCandidatesCU &event_of_cands_cu,
GPlexQI &XHitSize, GPlexHitIdx &XHitArr,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexHS *msErr, GPlexHV *msPar,
GPlexLS &Err_iC, GPlexLV &Par_iC,
GPlexQF &outChi2,
GPlexQF &Chi2, GPlexQI *HitsIdx_arr,
GPlexQI &inChg, GPlexQI &Label,
GPlexQI &SeedIdx, GPlexQI &CandIdx,
GPlexQB& Valid,
GeometryCU &geom,
int *maxSize, int N)
{
int gridx = (N-1)/BLOCK_SIZE_X + 1;
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
findInLayers_kernel_seed <<<grid, block, 0, stream>>>
(layers,
event_of_cands_cu.m_etabins_of_comb_candidates.data(),
XHitSize, XHitArr, Err_iP, Par_iP, msErr, msPar,
Err_iC, Par_iC, outChi2, Chi2, HitsIdx_arr, inChg, Label,
SeedIdx, CandIdx, Valid, geom, maxSize, N, Config::nlayers_per_seed);
/*cudaCheckErrorSync();*/
}
|
924cb3ef6eb5c66772fc578c302898f7d364576c.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
// includes, project
#include <cutil.h>
#define NUM 28
// includes, kernels
#include "../benchmark_common.h"
#include "NN_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
extern "C" void computeGold(float*,
const float*,
const float*,
unsigned int,
unsigned int,
unsigned int);
void NeuralNetwork(hipStream_t stream_app,
pthread_mutex_t* mutexapp,
bool flag);
unsigned g_verbose;
// unsigned NUM;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
// int
// main(int argc, char** argv)
int main_NN(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
/*int i, commandline_error;
commandline_error = 0;
g_verbose = 0;
if (argc >= 2) {
NUM = atoi(argv[1]);
for (i=2; i < argc;i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'v': g_verbose = 1;
break;
default: commandline_error=1;
}
}
else commandline_error=1;
}
} else commandline_error=1;
if (commandline_error || !NUM) {
printf("Usage: ./NN <NUM> [-v]\n");
printf("where NUM is the number of images to process in parallel (up
to 10000 for the t10k-images-idx3-ubyte database file) and -v is used to
display approximately what each image looks like.\n");
return 1;
}*/
// NUM = 28;
NeuralNetwork(stream_app, mutexapp, flag);
return 0;
// CUT_EXIT(argc, argv);
}
void InitGPUMem(float* Layer1_Neurons_GPU,
float* Layer1_Weights_GPU,
float* Layer2_Neurons_GPU,
float* Layer2_Weights_GPU,
float* Layer3_Neurons_GPU,
float* Layer3_Weights_GPU,
float* Layer4_Neurons_GPU,
float* Layer4_Weights_GPU,
float* Layer5_Neurons_GPU) {
CUDA_SAFE_CALL(
hipMalloc((void**)&Layer1_Neurons_GPU, sizeof(float) * 29 * 29 * NUM));
CUDA_SAFE_CALL(hipMalloc((void**)&Layer1_Weights_GPU, sizeof(float) * 156));
CUDA_SAFE_CALL(hipMalloc((void**)&Layer2_Neurons_GPU,
sizeof(float) * 13 * 13 * 6 * NUM));
CUDA_SAFE_CALL(hipMalloc((void**)&Layer2_Weights_GPU, sizeof(float) * 7800));
CUDA_SAFE_CALL(
hipMalloc((void**)&Layer3_Neurons_GPU, sizeof(float) * 1250 * NUM));
CUDA_SAFE_CALL(
hipMalloc((void**)&Layer3_Weights_GPU, sizeof(float) * 125100));
CUDA_SAFE_CALL(
hipMalloc((void**)&Layer4_Neurons_GPU, sizeof(float) * 100 * NUM));
CUDA_SAFE_CALL(hipMalloc((void**)&Layer4_Weights_GPU, sizeof(float) * 1010));
CUDA_SAFE_CALL(
hipMalloc((void**)&Layer5_Neurons_GPU, sizeof(float) * 10 * NUM));
}
void InitHostMem(float* Layer1_Weights_CPU,
float* Layer2_Weights_CPU,
float* Layer3_Weights_CPU,
float* Layer4_Weights_CPU) {
// initial layer 1 weight
FILE* pFile1 = fopen("../NN/data/lw1.wei", "rb");
if (pFile1 != NULL) {
for (int i = 0; i < 156; ++i) {
fread(&(Layer1_Weights_CPU[i]), sizeof(float), 1, pFile1);
// printf("Layer1_Weights_CPU[%d]=%f\n", i, Layer1_Weights_CPU[i]);
}
fclose(pFile1);
}
// initial layer 2 weight
FILE* pFile2 = fopen("../NN/data/lw2.wei", "rb");
if (pFile2 != NULL) {
fread(Layer2_Weights_CPU, sizeof(float), 7800, pFile2);
fclose(pFile2);
}
// initial layer 3 weight
FILE* pFile3 = fopen("../NN/data/lw3.wei", "rb");
if (pFile3 != NULL) {
fread(Layer3_Weights_CPU, sizeof(float), 125100, pFile3);
fclose(pFile3);
}
// initial layer 4 weight
FILE* pFile4 = fopen("../NN/data/lw4.wei", "rb");
if (pFile4 != NULL) {
fread(Layer4_Weights_CPU, sizeof(float), 1010, pFile4);
fclose(pFile4);
}
if (!(pFile1 && pFile2 && pFile3 && pFile4)) {
printf("FAIL! INPUT WEIGHTS NOT FOUND!\n");
exit(1);
}
}
int swapEndianInt(int bEnum) {
int lEnum;
char* lE = (char*)&lEnum;
char* bE = (char*)&bEnum;
lE[0] = bE[3];
lE[1] = bE[2];
lE[2] = bE[1];
lE[3] = bE[0];
return lEnum;
}
void readIn(float* layer1) {
FILE* fp;
unsigned int* foo;
unsigned int i, j;
foo = (unsigned int*)calloc(sizeof(unsigned int), 1);
// unsigned char image[29*29*NUM];
unsigned char* image = (unsigned char*)malloc(29 * 29 * NUM * sizeof(char));
for (i = 0; i < (29 * 29 * NUM); i++)
image[i] = 0;
fp = fopen("../NN/data/t10k-images-idx3-ubyte", "rt");
// fp=fopen("in.neu","rb");
if (fp) {
fread(foo, sizeof(int), 1, fp);
printf("magic number = %d\n", swapEndianInt(foo[0]));
fread(foo, sizeof(int), 1, fp);
printf("number of items = %d\n", swapEndianInt(foo[0]));
fread(foo, sizeof(int), 1, fp);
printf("number of rows = %d\n", swapEndianInt(foo[0]));
fread(foo, sizeof(int), 1, fp);
printf("number of rows = %d\n", swapEndianInt(foo[0]));
for (j = 0; j < NUM; j++) {
for (i = 0; i < 28; i++)
fread((image + i * 29 + j * 29 * 29), sizeof(char), 28, fp);
}
// fread(layer1,sizeof(float),29*29,fp);
fclose(fp);
for (i = 0; i < (29 * 29 * NUM); i++)
layer1[i] = (1.0 - (float)image[i] / 256);
} else {
printf("FAIL! data/t10k-images-idx3-ubyte NOT FOUND!\n");
exit(1);
}
}
void output(double* final) {
FILE* fp = 0;
fp = fopen("out.res", "wb");
if (fp) {
// for(i=0;i<10;i++) {
// printf("output[%d] = %e\n", i, final[i]);
//}
fwrite(final, sizeof(double), 10, fp);
fclose(fp);
}
}
void NeuralNetwork(hipStream_t stream_app,
pthread_mutex_t* mutexapp,
bool flag) {
int x, y;
// initialise card and timer
int deviceCount;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
} else
CUDA_SAFE_CALL(hipSetDevice(dev));
// float Layer1_Neurons_CPU[29*29*NUM];
float* Layer1_Neurons_CPU = (float*)malloc(29 * 29 * NUM * sizeof(float));
/*={
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,0,0,0,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};*/
readIn(Layer1_Neurons_CPU);
if (g_verbose) {
for (y = 0; y < 29 * NUM; y++) {
if (!(y % 29))
printf("\n");
for (x = 0; x < 29; x++) {
if (Layer1_Neurons_CPU[y * 29 + x] < 0.5) {
printf("0");
} else
printf(" ");
// printf("%d", (Layer1_Neurons_CPU[y*29+x]>0.5));
}
printf("\n");
}
}
float* Layer1_Neurons_GPU;
float Layer1_Weights_CPU[156];
float* Layer1_Weights_GPU;
float Layer2_Weights_CPU[7800];
float* Layer2_Weights_GPU;
float* Layer2_Neurons_GPU;
float Layer3_Weights_CPU[125100];
float* Layer3_Weights_GPU;
float* Layer3_Neurons_GPU;
float Layer4_Weights_CPU[1010];
float* Layer4_Weights_GPU;
float* Layer4_Neurons_GPU;
// float Layer5_Neurons_CPU[10*NUM];//={0,0,0,0,0,0,0,0,0,0};
float* Layer5_Neurons_CPU = (float*)malloc(10 * NUM * sizeof(float));
for (x = 0; x < 10 * NUM; x++)
Layer5_Neurons_CPU[x] = 0;
float* Layer5_Neurons_GPU;
double* outputLayer;
// unsigned int timer = 0;
// float totaltime = 0.0f;
// init input here
InitHostMem(Layer1_Weights_CPU, Layer2_Weights_CPU, Layer3_Weights_CPU,
Layer4_Weights_CPU);
// allocate momory on Device
// InitGPUMem(Layer1_Neurons_GPU,Layer1_Weights_GPU,Layer2_Neurons_GPU,Layer2_Weights_GPU,Layer3_Neurons_GPU,Layer3_Weights_GPU,Layer4_Neurons_GPU,Layer4_Weights_GPU,Layer5_Neurons_GPU);
CUDA_SAFE_CALL(
hipMalloc((void**)&Layer1_Neurons_GPU, sizeof(float) * 29 * 29 * NUM));
CUDA_SAFE_CALL(hipMalloc((void**)&Layer1_Weights_GPU, sizeof(float) * 156));
CUDA_SAFE_CALL(hipMalloc((void**)&Layer2_Neurons_GPU,
sizeof(float) * 13 * 13 * 6 * NUM));
CUDA_SAFE_CALL(hipMalloc((void**)&Layer2_Weights_GPU, sizeof(float) * 7800));
CUDA_SAFE_CALL(
hipMalloc((void**)&Layer3_Neurons_GPU, sizeof(float) * 1250 * NUM));
CUDA_SAFE_CALL(
hipMalloc((void**)&Layer3_Weights_GPU, sizeof(float) * 125100));
CUDA_SAFE_CALL(
hipMalloc((void**)&Layer4_Neurons_GPU, sizeof(float) * 100 * NUM));
CUDA_SAFE_CALL(hipMalloc((void**)&Layer4_Weights_GPU, sizeof(float) * 1010));
CUDA_SAFE_CALL(
hipMalloc((void**)&Layer5_Neurons_GPU, sizeof(float) * 10 * NUM));
outputLayer = (double*)malloc(sizeof(double) * 10 * NUM);
// init 29x29 handwritting array
// already done in "initial"
// copy from CPU to GPU
CUDA_SAFE_CALL(hipMemcpyAsync(Layer1_Neurons_GPU, Layer1_Neurons_CPU,
sizeof(float) * 29 * 29 * NUM,
hipMemcpyHostToDevice, stream_app));
CUDA_SAFE_CALL(hipMemcpyAsync(Layer1_Weights_GPU, Layer1_Weights_CPU,
sizeof(float) * 156, hipMemcpyHostToDevice,
stream_app));
CUDA_SAFE_CALL(hipMemcpyAsync(Layer2_Weights_GPU, Layer2_Weights_CPU,
sizeof(float) * 7800, hipMemcpyHostToDevice,
stream_app));
CUDA_SAFE_CALL(hipMemcpyAsync(Layer3_Weights_GPU, Layer3_Weights_CPU,
sizeof(float) * 125100, hipMemcpyHostToDevice,
stream_app));
CUDA_SAFE_CALL(hipMemcpyAsync(Layer4_Weights_GPU, Layer4_Weights_CPU,
sizeof(float) * 1010, hipMemcpyHostToDevice,
stream_app));
CUDA_SAFE_CALL(hipMemcpyAsync(Layer5_Neurons_GPU, Layer5_Neurons_CPU,
sizeof(float) * 10 * NUM,
hipMemcpyHostToDevice, stream_app));
// CUT_SAFE_CALL(cutCreateTimer(&timer));
// CUT_SAFE_CALL(cutStartTimer(timer));
printf("NUM=%d\n", NUM);
dim3 Layer1_Block(6, NUM, 1);
dim3 Layer1_Thread(13, 13);
hipLaunchKernelGGL(( executeFirstLayer), dim3(Layer1_Block), dim3(Layer1_Thread), 0, stream_app,
Layer1_Neurons_GPU, Layer1_Weights_GPU, Layer2_Neurons_GPU);
dim3 Layer2_Block(50, NUM, 1);
dim3 Layer2_Thread(5, 5);
hipLaunchKernelGGL(( executeSecondLayer), dim3(Layer2_Block), dim3(Layer2_Thread), 0, stream_app,
Layer2_Neurons_GPU, Layer2_Weights_GPU, Layer3_Neurons_GPU);
dim3 Layer3_Block(100, NUM, 1);
dim3 Layer3_Thread(1, 1);
hipLaunchKernelGGL(( executeThirdLayer), dim3(Layer3_Block), dim3(Layer3_Thread), 0, stream_app,
Layer3_Neurons_GPU, Layer3_Weights_GPU, Layer4_Neurons_GPU);
dim3 Layer4_Block(10, NUM, 1);
dim3 Layer4_Thread(1, 1);
hipLaunchKernelGGL(( executeFourthLayer), dim3(Layer4_Block), dim3(Layer4_Thread), 0, stream_app,
Layer4_Neurons_GPU, Layer4_Weights_GPU, Layer5_Neurons_GPU);
CUT_CHECK_ERROR("Kernel execution failed");
// CUT_SAFE_CALL(cutStopTimer(timer));
// totaltime = cutGetTimerValue(timer);
// copy from GPU to CPU
// CUDA_SAFE_CALL(hipMemcpy(Layer5_Neurons_CPU,Layer5_Neurons_GPU,
// sizeof(float)*10*NUM, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpyAsync(Layer5_Neurons_CPU, Layer5_Neurons_GPU,
sizeof(float) * 10 * NUM,
hipMemcpyDeviceToHost, stream_app));
flag = 1;
pthread_mutex_unlock(mutexapp);
if (flag) {
cutilSafeCall(hipStreamSynchronize(stream_app));
} else {
cutilSafeCall(hipDeviceSynchronize());
}
// stop and destroy timer
// printf("Processing time: %f (ms) \n", totaltime);
// CUT_SAFE_CALL(cutDeleteTimer(timer));
for (int a = 0; a < 10 * NUM; a++) {
// printf("output[%d]=%f\n", a, Layer5_Neurons_CPU[a]);
outputLayer[a] = (double)Layer5_Neurons_CPU[a];
if (!(a % 10)) {
if (a)
printf("%d ", y);
x = outputLayer[a];
y = 0;
}
if (outputLayer[a] > x) {
x = outputLayer[a];
y = a % 10;
}
}
printf("%d\n", y);
output(outputLayer);
/*
//float Layer4_Neurons_CPU[100*NUM];
float *Layer4_Neurons_CPU = (float*) malloc(100*NUM*sizeof(float));
CUDA_SAFE_CALL(hipMemcpy(Layer4_Neurons_CPU,Layer4_Neurons_GPU,sizeof(float)*100,hipMemcpyDeviceToHost));
FILE *fp=fopen("layer_4.neu","wb");
fwrite(Layer4_Neurons_CPU,sizeof(float),100*NUM,fp);
fclose(fp);
//float Layer3_Neurons_CPU[50*5*5*NUM];
float *Layer3_Neurons_CPU = (float*) malloc(50*5*5*NUM*sizeof(float));
CUDA_SAFE_CALL(hipMemcpy(Layer3_Neurons_CPU,Layer3_Neurons_GPU,sizeof(float)*50*5*5,hipMemcpyDeviceToHost));
fp=fopen("layer_3.neu","wb");
fwrite(Layer3_Neurons_CPU,sizeof(float),50*5*5*NUM,fp);
fclose(fp);
//float Layer2_Neurons_CPU[13*13*6*NUM];
float *Layer2_Neurons_CPU = (float*) malloc(13*13*6*NUM*sizeof(float));
CUDA_SAFE_CALL(hipMemcpy(Layer2_Neurons_CPU,Layer2_Neurons_GPU,sizeof(float)*13*13*6,hipMemcpyDeviceToHost));
fp=fopen("layer_2.neu","wb");
fwrite(Layer2_Neurons_CPU,sizeof(float),13*13*6*NUM,fp);
fclose(fp);
fp=fopen("layer_1.neu","wb");
fwrite(Layer1_Neurons_CPU,sizeof(float),29*29*NUM,fp);
fclose(fp); */
// exit(0);
}
| 924cb3ef6eb5c66772fc578c302898f7d364576c.cu | // includes, system
#include <cuda.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
// includes, project
#include <cutil.h>
#define NUM 28
// includes, kernels
#include "../benchmark_common.h"
#include "NN_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
extern "C" void computeGold(float*,
const float*,
const float*,
unsigned int,
unsigned int,
unsigned int);
void NeuralNetwork(cudaStream_t stream_app,
pthread_mutex_t* mutexapp,
bool flag);
unsigned g_verbose;
// unsigned NUM;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
// int
// main(int argc, char** argv)
int main_NN(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
/*int i, commandline_error;
commandline_error = 0;
g_verbose = 0;
if (argc >= 2) {
NUM = atoi(argv[1]);
for (i=2; i < argc;i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'v': g_verbose = 1;
break;
default: commandline_error=1;
}
}
else commandline_error=1;
}
} else commandline_error=1;
if (commandline_error || !NUM) {
printf("Usage: ./NN <NUM> [-v]\n");
printf("where NUM is the number of images to process in parallel (up
to 10000 for the t10k-images-idx3-ubyte database file) and -v is used to
display approximately what each image looks like.\n");
return 1;
}*/
// NUM = 28;
NeuralNetwork(stream_app, mutexapp, flag);
return 0;
// CUT_EXIT(argc, argv);
}
void InitGPUMem(float* Layer1_Neurons_GPU,
float* Layer1_Weights_GPU,
float* Layer2_Neurons_GPU,
float* Layer2_Weights_GPU,
float* Layer3_Neurons_GPU,
float* Layer3_Weights_GPU,
float* Layer4_Neurons_GPU,
float* Layer4_Weights_GPU,
float* Layer5_Neurons_GPU) {
CUDA_SAFE_CALL(
cudaMalloc((void**)&Layer1_Neurons_GPU, sizeof(float) * 29 * 29 * NUM));
CUDA_SAFE_CALL(cudaMalloc((void**)&Layer1_Weights_GPU, sizeof(float) * 156));
CUDA_SAFE_CALL(cudaMalloc((void**)&Layer2_Neurons_GPU,
sizeof(float) * 13 * 13 * 6 * NUM));
CUDA_SAFE_CALL(cudaMalloc((void**)&Layer2_Weights_GPU, sizeof(float) * 7800));
CUDA_SAFE_CALL(
cudaMalloc((void**)&Layer3_Neurons_GPU, sizeof(float) * 1250 * NUM));
CUDA_SAFE_CALL(
cudaMalloc((void**)&Layer3_Weights_GPU, sizeof(float) * 125100));
CUDA_SAFE_CALL(
cudaMalloc((void**)&Layer4_Neurons_GPU, sizeof(float) * 100 * NUM));
CUDA_SAFE_CALL(cudaMalloc((void**)&Layer4_Weights_GPU, sizeof(float) * 1010));
CUDA_SAFE_CALL(
cudaMalloc((void**)&Layer5_Neurons_GPU, sizeof(float) * 10 * NUM));
}
void InitHostMem(float* Layer1_Weights_CPU,
float* Layer2_Weights_CPU,
float* Layer3_Weights_CPU,
float* Layer4_Weights_CPU) {
// initial layer 1 weight
FILE* pFile1 = fopen("../NN/data/lw1.wei", "rb");
if (pFile1 != NULL) {
for (int i = 0; i < 156; ++i) {
fread(&(Layer1_Weights_CPU[i]), sizeof(float), 1, pFile1);
// printf("Layer1_Weights_CPU[%d]=%f\n", i, Layer1_Weights_CPU[i]);
}
fclose(pFile1);
}
// initial layer 2 weight
FILE* pFile2 = fopen("../NN/data/lw2.wei", "rb");
if (pFile2 != NULL) {
fread(Layer2_Weights_CPU, sizeof(float), 7800, pFile2);
fclose(pFile2);
}
// initial layer 3 weight
FILE* pFile3 = fopen("../NN/data/lw3.wei", "rb");
if (pFile3 != NULL) {
fread(Layer3_Weights_CPU, sizeof(float), 125100, pFile3);
fclose(pFile3);
}
// initial layer 4 weight
FILE* pFile4 = fopen("../NN/data/lw4.wei", "rb");
if (pFile4 != NULL) {
fread(Layer4_Weights_CPU, sizeof(float), 1010, pFile4);
fclose(pFile4);
}
if (!(pFile1 && pFile2 && pFile3 && pFile4)) {
printf("FAIL! INPUT WEIGHTS NOT FOUND!\n");
exit(1);
}
}
int swapEndianInt(int bEnum) {
int lEnum;
char* lE = (char*)&lEnum;
char* bE = (char*)&bEnum;
lE[0] = bE[3];
lE[1] = bE[2];
lE[2] = bE[1];
lE[3] = bE[0];
return lEnum;
}
void readIn(float* layer1) {
FILE* fp;
unsigned int* foo;
unsigned int i, j;
foo = (unsigned int*)calloc(sizeof(unsigned int), 1);
// unsigned char image[29*29*NUM];
unsigned char* image = (unsigned char*)malloc(29 * 29 * NUM * sizeof(char));
for (i = 0; i < (29 * 29 * NUM); i++)
image[i] = 0;
fp = fopen("../NN/data/t10k-images-idx3-ubyte", "rt");
// fp=fopen("in.neu","rb");
if (fp) {
fread(foo, sizeof(int), 1, fp);
printf("magic number = %d\n", swapEndianInt(foo[0]));
fread(foo, sizeof(int), 1, fp);
printf("number of items = %d\n", swapEndianInt(foo[0]));
fread(foo, sizeof(int), 1, fp);
printf("number of rows = %d\n", swapEndianInt(foo[0]));
fread(foo, sizeof(int), 1, fp);
printf("number of rows = %d\n", swapEndianInt(foo[0]));
for (j = 0; j < NUM; j++) {
for (i = 0; i < 28; i++)
fread((image + i * 29 + j * 29 * 29), sizeof(char), 28, fp);
}
// fread(layer1,sizeof(float),29*29,fp);
fclose(fp);
for (i = 0; i < (29 * 29 * NUM); i++)
layer1[i] = (1.0 - (float)image[i] / 256);
} else {
printf("FAIL! data/t10k-images-idx3-ubyte NOT FOUND!\n");
exit(1);
}
}
void output(double* final) {
FILE* fp = 0;
fp = fopen("out.res", "wb");
if (fp) {
// for(i=0;i<10;i++) {
// printf("output[%d] = %e\n", i, final[i]);
//}
fwrite(final, sizeof(double), 10, fp);
fclose(fp);
}
}
void NeuralNetwork(cudaStream_t stream_app,
pthread_mutex_t* mutexapp,
bool flag) {
int x, y;
// initialise card and timer
int deviceCount;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
} else
CUDA_SAFE_CALL(cudaSetDevice(dev));
// float Layer1_Neurons_CPU[29*29*NUM];
float* Layer1_Neurons_CPU = (float*)malloc(29 * 29 * NUM * sizeof(float));
/*={
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,0,0,0,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};*/
readIn(Layer1_Neurons_CPU);
if (g_verbose) {
for (y = 0; y < 29 * NUM; y++) {
if (!(y % 29))
printf("\n");
for (x = 0; x < 29; x++) {
if (Layer1_Neurons_CPU[y * 29 + x] < 0.5) {
printf("0");
} else
printf(" ");
// printf("%d", (Layer1_Neurons_CPU[y*29+x]>0.5));
}
printf("\n");
}
}
float* Layer1_Neurons_GPU;
float Layer1_Weights_CPU[156];
float* Layer1_Weights_GPU;
float Layer2_Weights_CPU[7800];
float* Layer2_Weights_GPU;
float* Layer2_Neurons_GPU;
float Layer3_Weights_CPU[125100];
float* Layer3_Weights_GPU;
float* Layer3_Neurons_GPU;
float Layer4_Weights_CPU[1010];
float* Layer4_Weights_GPU;
float* Layer4_Neurons_GPU;
// float Layer5_Neurons_CPU[10*NUM];//={0,0,0,0,0,0,0,0,0,0};
float* Layer5_Neurons_CPU = (float*)malloc(10 * NUM * sizeof(float));
for (x = 0; x < 10 * NUM; x++)
Layer5_Neurons_CPU[x] = 0;
float* Layer5_Neurons_GPU;
double* outputLayer;
// unsigned int timer = 0;
// float totaltime = 0.0f;
// init input here
InitHostMem(Layer1_Weights_CPU, Layer2_Weights_CPU, Layer3_Weights_CPU,
Layer4_Weights_CPU);
// allocate momory on Device
// InitGPUMem(Layer1_Neurons_GPU,Layer1_Weights_GPU,Layer2_Neurons_GPU,Layer2_Weights_GPU,Layer3_Neurons_GPU,Layer3_Weights_GPU,Layer4_Neurons_GPU,Layer4_Weights_GPU,Layer5_Neurons_GPU);
CUDA_SAFE_CALL(
cudaMalloc((void**)&Layer1_Neurons_GPU, sizeof(float) * 29 * 29 * NUM));
CUDA_SAFE_CALL(cudaMalloc((void**)&Layer1_Weights_GPU, sizeof(float) * 156));
CUDA_SAFE_CALL(cudaMalloc((void**)&Layer2_Neurons_GPU,
sizeof(float) * 13 * 13 * 6 * NUM));
CUDA_SAFE_CALL(cudaMalloc((void**)&Layer2_Weights_GPU, sizeof(float) * 7800));
CUDA_SAFE_CALL(
cudaMalloc((void**)&Layer3_Neurons_GPU, sizeof(float) * 1250 * NUM));
CUDA_SAFE_CALL(
cudaMalloc((void**)&Layer3_Weights_GPU, sizeof(float) * 125100));
CUDA_SAFE_CALL(
cudaMalloc((void**)&Layer4_Neurons_GPU, sizeof(float) * 100 * NUM));
CUDA_SAFE_CALL(cudaMalloc((void**)&Layer4_Weights_GPU, sizeof(float) * 1010));
CUDA_SAFE_CALL(
cudaMalloc((void**)&Layer5_Neurons_GPU, sizeof(float) * 10 * NUM));
outputLayer = (double*)malloc(sizeof(double) * 10 * NUM);
// init 29x29 handwritting array
// already done in "initial"
// copy from CPU to GPU
CUDA_SAFE_CALL(cudaMemcpyAsync(Layer1_Neurons_GPU, Layer1_Neurons_CPU,
sizeof(float) * 29 * 29 * NUM,
cudaMemcpyHostToDevice, stream_app));
CUDA_SAFE_CALL(cudaMemcpyAsync(Layer1_Weights_GPU, Layer1_Weights_CPU,
sizeof(float) * 156, cudaMemcpyHostToDevice,
stream_app));
CUDA_SAFE_CALL(cudaMemcpyAsync(Layer2_Weights_GPU, Layer2_Weights_CPU,
sizeof(float) * 7800, cudaMemcpyHostToDevice,
stream_app));
CUDA_SAFE_CALL(cudaMemcpyAsync(Layer3_Weights_GPU, Layer3_Weights_CPU,
sizeof(float) * 125100, cudaMemcpyHostToDevice,
stream_app));
CUDA_SAFE_CALL(cudaMemcpyAsync(Layer4_Weights_GPU, Layer4_Weights_CPU,
sizeof(float) * 1010, cudaMemcpyHostToDevice,
stream_app));
CUDA_SAFE_CALL(cudaMemcpyAsync(Layer5_Neurons_GPU, Layer5_Neurons_CPU,
sizeof(float) * 10 * NUM,
cudaMemcpyHostToDevice, stream_app));
// CUT_SAFE_CALL(cutCreateTimer(&timer));
// CUT_SAFE_CALL(cutStartTimer(timer));
printf("NUM=%d\n", NUM);
dim3 Layer1_Block(6, NUM, 1);
dim3 Layer1_Thread(13, 13);
executeFirstLayer<<<Layer1_Block, Layer1_Thread, 0, stream_app>>>(
Layer1_Neurons_GPU, Layer1_Weights_GPU, Layer2_Neurons_GPU);
dim3 Layer2_Block(50, NUM, 1);
dim3 Layer2_Thread(5, 5);
executeSecondLayer<<<Layer2_Block, Layer2_Thread, 0, stream_app>>>(
Layer2_Neurons_GPU, Layer2_Weights_GPU, Layer3_Neurons_GPU);
dim3 Layer3_Block(100, NUM, 1);
dim3 Layer3_Thread(1, 1);
executeThirdLayer<<<Layer3_Block, Layer3_Thread, 0, stream_app>>>(
Layer3_Neurons_GPU, Layer3_Weights_GPU, Layer4_Neurons_GPU);
dim3 Layer4_Block(10, NUM, 1);
dim3 Layer4_Thread(1, 1);
executeFourthLayer<<<Layer4_Block, Layer4_Thread, 0, stream_app>>>(
Layer4_Neurons_GPU, Layer4_Weights_GPU, Layer5_Neurons_GPU);
CUT_CHECK_ERROR("Kernel execution failed");
// CUT_SAFE_CALL(cutStopTimer(timer));
// totaltime = cutGetTimerValue(timer);
// copy from GPU to CPU
// CUDA_SAFE_CALL(cudaMemcpy(Layer5_Neurons_CPU,Layer5_Neurons_GPU,
// sizeof(float)*10*NUM, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpyAsync(Layer5_Neurons_CPU, Layer5_Neurons_GPU,
sizeof(float) * 10 * NUM,
cudaMemcpyDeviceToHost, stream_app));
flag = 1;
pthread_mutex_unlock(mutexapp);
if (flag) {
cutilSafeCall(cudaStreamSynchronize(stream_app));
} else {
cutilSafeCall(cudaThreadSynchronize());
}
// stop and destroy timer
// printf("Processing time: %f (ms) \n", totaltime);
// CUT_SAFE_CALL(cutDeleteTimer(timer));
for (int a = 0; a < 10 * NUM; a++) {
// printf("output[%d]=%f\n", a, Layer5_Neurons_CPU[a]);
outputLayer[a] = (double)Layer5_Neurons_CPU[a];
if (!(a % 10)) {
if (a)
printf("%d ", y);
x = outputLayer[a];
y = 0;
}
if (outputLayer[a] > x) {
x = outputLayer[a];
y = a % 10;
}
}
printf("%d\n", y);
output(outputLayer);
/*
//float Layer4_Neurons_CPU[100*NUM];
float *Layer4_Neurons_CPU = (float*) malloc(100*NUM*sizeof(float));
CUDA_SAFE_CALL(cudaMemcpy(Layer4_Neurons_CPU,Layer4_Neurons_GPU,sizeof(float)*100,cudaMemcpyDeviceToHost));
FILE *fp=fopen("layer_4.neu","wb");
fwrite(Layer4_Neurons_CPU,sizeof(float),100*NUM,fp);
fclose(fp);
//float Layer3_Neurons_CPU[50*5*5*NUM];
float *Layer3_Neurons_CPU = (float*) malloc(50*5*5*NUM*sizeof(float));
CUDA_SAFE_CALL(cudaMemcpy(Layer3_Neurons_CPU,Layer3_Neurons_GPU,sizeof(float)*50*5*5,cudaMemcpyDeviceToHost));
fp=fopen("layer_3.neu","wb");
fwrite(Layer3_Neurons_CPU,sizeof(float),50*5*5*NUM,fp);
fclose(fp);
//float Layer2_Neurons_CPU[13*13*6*NUM];
float *Layer2_Neurons_CPU = (float*) malloc(13*13*6*NUM*sizeof(float));
CUDA_SAFE_CALL(cudaMemcpy(Layer2_Neurons_CPU,Layer2_Neurons_GPU,sizeof(float)*13*13*6,cudaMemcpyDeviceToHost));
fp=fopen("layer_2.neu","wb");
fwrite(Layer2_Neurons_CPU,sizeof(float),13*13*6*NUM,fp);
fclose(fp);
fp=fopen("layer_1.neu","wb");
fwrite(Layer1_Neurons_CPU,sizeof(float),29*29*NUM,fp);
fclose(fp); */
// exit(0);
}
|
4b41a4a7643fc0c09a5c808875771e0b74beb0af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cmath>
#include "error_checks.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG
// Number of iterations in the kernel
#define ITER_MULTIPLIER 4
// Number of tests, number of streams doubles for each test. That is,
// there will be 2^N_TESTS streams in the last test
#define N_TESTS 4
// Information of stream for simple domain decomposition
struct stream {
hipStream_t strm; // Stream
int len; // Length of the part for this stream
int start; // Offset to the start of the part
};
// Kernel for vector summation
__global__ void vector_add(double *C, const double *A, const double *B,
int N, int iterations)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Do not try to access past the allocated memory
for (int i = idx; i < N; i += stride) {
C[i] = 0;
for (int j = 0; j < ITER_MULTIPLIER * iterations; j++) {
C[i] += A[i] + B[i];
}
}
}
// Routine for stream test
void streamtest(double *hC, const double *hA, const double *hB,
double *dC, const double *dA, const double *dB,
stream *s, int nstreams, float *gputime, int tib,
int iterations)
{
// Add here the needed timing event calls
hipEvent_t start, stop;
CUDA_CHECK( hipEventCreate(&start) );
CUDA_CHECK( hipEventCreate(&stop) );
CUDA_CHECK( hipEventRecord(start) );
for (int i = 0; i < nstreams; ++i) {
// Add here the copy - kernel execution - copy sequence
// for each stream
//
// Each stream will copy their own part of the input data
// to the GPU starting from address &(dA[sidx]).
// Size of the block is slen (see variables below).
int sidx = s[i].start;
int slen = s[i].len;
CUDA_CHECK( hipMemcpyAsync((void *)&(dA[sidx]), (void *)&(hA[sidx]),
sizeof(double) * slen,
hipMemcpyHostToDevice, s[i].strm) );
CUDA_CHECK( hipMemcpyAsync((void *)&(dB[sidx]), (void *)&(hB[sidx]),
sizeof(double) * slen,
hipMemcpyHostToDevice, s[i].strm) );
// You can use these values for the grid and block sizes
dim3 grid, threads;
grid.x = (slen + tib - 1) / tib;
threads.x = tib;
hipLaunchKernelGGL(( vector_add), dim3(grid), dim3(threads), 0, s[i].strm, &(dC[sidx]), &(dA[sidx]),
&(dB[sidx]), slen,
iterations);
CUDA_CHECK( hipMemcpyAsync((void *)&(hC[sidx]), (void *)&(dC[sidx]),
sizeof(double) * slen,
hipMemcpyDeviceToHost, s[i].strm) );
}
// Add the calls needed for execution timing and compute
// the elapsed time to the gputime variable
CUDA_CHECK( hipEventRecord(stop) );
CUDA_CHECK( hipEventSynchronize(stop) );
CHECK_ERROR_MSG("Stream test failed");
CUDA_CHECK( hipEventElapsedTime(gputime, start, stop) );
CUDA_CHECK( hipEventDestroy(start) );
CUDA_CHECK( hipEventDestroy(stop) );
}
// Routine for default stream reference
void default_stream(double *hC, const double *hA, const double *hB,
double *dC, const double *dA, const double *dB,
int N, float *gputime, int tib, int iterations)
{
// Add here the needed timing event calls
hipEvent_t start, stop;
CUDA_CHECK( hipEventCreate(&start) );
CUDA_CHECK( hipEventCreate(&stop) );
CUDA_CHECK( hipEventRecord(start) );
// Non-asynchronous copies
CUDA_CHECK( hipMemcpy((void *)dA, (void *)hA,
sizeof(double) * N,
hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy((void *)dB, (void *)hB,
sizeof(double) * N,
hipMemcpyHostToDevice) );
dim3 grid, threads;
grid.x = (N + tib - 1) / tib;
threads.x = tib;
hipLaunchKernelGGL(( vector_add), dim3(grid), dim3(threads), 0, 0, dC, dA, dB, N, iterations);
CUDA_CHECK( hipMemcpyAsync((void *)hC, (void *)dC,
sizeof(double) * N,
hipMemcpyDeviceToHost) );
// Add the calls needed for execution timing and compute
// the elapsed time to the gputime variable
CUDA_CHECK( hipEventRecord(stop) );
CUDA_CHECK( hipEventSynchronize(stop) );
CHECK_ERROR_MSG("Default stream test failed");
CUDA_CHECK( hipEventElapsedTime(gputime, start, stop) );
CUDA_CHECK( hipEventDestroy(start) );
CUDA_CHECK( hipEventDestroy(stop) );
}
// Create the streams and compute the decomposition
void create_streams(int nstreams, int vecsize, stream **strm)
{
*strm = new stream[nstreams];
stream *s = *strm;
for(int i = 0; i < nstreams; i++) {
CUDA_CHECK( hipStreamCreate(&s[i].strm) );
}
s[0].start = 0;
s[0].len = vecsize / nstreams;
s[0].len += vecsize % nstreams ? 1 : 0;
for(int i = 1; i < nstreams; i++) {
int offset = vecsize / nstreams;
if(i < vecsize % nstreams) {
offset++;
}
s[i].len = offset;
s[i].start = s[i-1].start + offset;
}
}
// Delete the streams
void destroy_streams(int nstreams, stream *s)
{
for(int i = 0; i < nstreams; i++) {
CUDA_CHECK( hipStreamDestroy(s[i].strm) );
}
delete[] s;
}
int main(int argc, char *argv[])
{
const int ThreadsInBlock = 512;
int iterations;
double *dA, *dB, *dC;
double *hA, *hB, *hC;
double ref_value;
float gputime_ref, gputimes[N_TESTS];
stream *s;
hipDeviceProp_t prop;
if (argc < 2) {
printf("Usage: %s N\nwhere N is the length of the vector.\n",
argv[0]);
exit(EXIT_FAILURE);
}
int N = atoi(argv[1]);
// Determine the number of available multiprocessors on the device.
// It is used for a coarse adjustment of the computation part of
// this test.
hipGetDeviceProperties(&prop, 0);
iterations = (prop.multiProcessorCount + 1) / 2;
// Add here the host memory allocation routines (page-locked)
CUDA_CHECK( hipHostMalloc((void**)&hA, sizeof(double) * N) );
CUDA_CHECK( hipHostMalloc((void**)&hB, sizeof(double) * N) );
CUDA_CHECK( hipHostMalloc((void**)&hC, sizeof(double) * N) );
for(int i = 0; i < N; ++i) {
hA[i] = 1.0;
hB[i] = 2.0;
}
ref_value = 3.0 * ITER_MULTIPLIER * iterations;
CUDA_CHECK( hipMalloc((void**)&dA, sizeof(double) * N) );
CUDA_CHECK( hipMalloc((void**)&dB, sizeof(double) * N) );
CUDA_CHECK( hipMalloc((void**)&dC, sizeof(double) * N) );
// Check the timings of default stream first
default_stream(hC, hA, hB, dC, dA, dB, N, &gputime_ref, ThreadsInBlock,
iterations);
// Here we loop over the test. On each iteration, we double the number
// of streams.
for(int strm = 0; strm < N_TESTS; strm++) {
int stream_count = 1<<strm;
create_streams(stream_count, N, &s);
streamtest(hC, hA, hB, dC, dA, dB, s, stream_count, &gputimes[strm],
ThreadsInBlock, iterations);
destroy_streams(stream_count, s);
}
CUDA_CHECK( hipFree((void*)dA) );
CUDA_CHECK( hipFree((void*)dB) );
CUDA_CHECK( hipFree((void*)dC) );
int errorsum = 0;
for (int i = 0; i < N; i++) {
errorsum += hC[i] - ref_value;
}
printf("Error sum = %i\n", errorsum);
printf("Time elapsed for reference run: %f\n", gputime_ref / 1000.);
for(int i = 0; i < N_TESTS; i++) {
printf("Time elapsed for test %2i: %f\n", 1<<i,
gputimes[i] / 1000.);
}
// Add here the correct host memory freeing routines
CUDA_CHECK( hipHostFree((void*)hA) );
CUDA_CHECK( hipHostFree((void*)hB) );
CUDA_CHECK( hipHostFree((void*)hC) );
return 0;
}
| 4b41a4a7643fc0c09a5c808875771e0b74beb0af.cu | #include <cstdio>
#include <cmath>
#include "error_checks.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG
// Number of iterations in the kernel
#define ITER_MULTIPLIER 4
// Number of tests, number of streams doubles for each test. That is,
// there will be 2^N_TESTS streams in the last test
#define N_TESTS 4
// Information of stream for simple domain decomposition
struct stream {
cudaStream_t strm; // Stream
int len; // Length of the part for this stream
int start; // Offset to the start of the part
};
// Kernel for vector summation
__global__ void vector_add(double *C, const double *A, const double *B,
int N, int iterations)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Do not try to access past the allocated memory
for (int i = idx; i < N; i += stride) {
C[i] = 0;
for (int j = 0; j < ITER_MULTIPLIER * iterations; j++) {
C[i] += A[i] + B[i];
}
}
}
// Routine for stream test
void streamtest(double *hC, const double *hA, const double *hB,
double *dC, const double *dA, const double *dB,
stream *s, int nstreams, float *gputime, int tib,
int iterations)
{
// Add here the needed timing event calls
cudaEvent_t start, stop;
CUDA_CHECK( cudaEventCreate(&start) );
CUDA_CHECK( cudaEventCreate(&stop) );
CUDA_CHECK( cudaEventRecord(start) );
for (int i = 0; i < nstreams; ++i) {
// Add here the copy - kernel execution - copy sequence
// for each stream
//
// Each stream will copy their own part of the input data
// to the GPU starting from address &(dA[sidx]).
// Size of the block is slen (see variables below).
int sidx = s[i].start;
int slen = s[i].len;
CUDA_CHECK( cudaMemcpyAsync((void *)&(dA[sidx]), (void *)&(hA[sidx]),
sizeof(double) * slen,
cudaMemcpyHostToDevice, s[i].strm) );
CUDA_CHECK( cudaMemcpyAsync((void *)&(dB[sidx]), (void *)&(hB[sidx]),
sizeof(double) * slen,
cudaMemcpyHostToDevice, s[i].strm) );
// You can use these values for the grid and block sizes
dim3 grid, threads;
grid.x = (slen + tib - 1) / tib;
threads.x = tib;
vector_add<<<grid, threads, 0, s[i].strm>>>(&(dC[sidx]), &(dA[sidx]),
&(dB[sidx]), slen,
iterations);
CUDA_CHECK( cudaMemcpyAsync((void *)&(hC[sidx]), (void *)&(dC[sidx]),
sizeof(double) * slen,
cudaMemcpyDeviceToHost, s[i].strm) );
}
// Add the calls needed for execution timing and compute
// the elapsed time to the gputime variable
CUDA_CHECK( cudaEventRecord(stop) );
CUDA_CHECK( cudaEventSynchronize(stop) );
CHECK_ERROR_MSG("Stream test failed");
CUDA_CHECK( cudaEventElapsedTime(gputime, start, stop) );
CUDA_CHECK( cudaEventDestroy(start) );
CUDA_CHECK( cudaEventDestroy(stop) );
}
// Routine for default stream reference
void default_stream(double *hC, const double *hA, const double *hB,
double *dC, const double *dA, const double *dB,
int N, float *gputime, int tib, int iterations)
{
// Add here the needed timing event calls
cudaEvent_t start, stop;
CUDA_CHECK( cudaEventCreate(&start) );
CUDA_CHECK( cudaEventCreate(&stop) );
CUDA_CHECK( cudaEventRecord(start) );
// Non-asynchronous copies
CUDA_CHECK( cudaMemcpy((void *)dA, (void *)hA,
sizeof(double) * N,
cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy((void *)dB, (void *)hB,
sizeof(double) * N,
cudaMemcpyHostToDevice) );
dim3 grid, threads;
grid.x = (N + tib - 1) / tib;
threads.x = tib;
vector_add<<<grid, threads>>>(dC, dA, dB, N, iterations);
CUDA_CHECK( cudaMemcpyAsync((void *)hC, (void *)dC,
sizeof(double) * N,
cudaMemcpyDeviceToHost) );
// Add the calls needed for execution timing and compute
// the elapsed time to the gputime variable
CUDA_CHECK( cudaEventRecord(stop) );
CUDA_CHECK( cudaEventSynchronize(stop) );
CHECK_ERROR_MSG("Default stream test failed");
CUDA_CHECK( cudaEventElapsedTime(gputime, start, stop) );
CUDA_CHECK( cudaEventDestroy(start) );
CUDA_CHECK( cudaEventDestroy(stop) );
}
// Create the streams and compute the decomposition
void create_streams(int nstreams, int vecsize, stream **strm)
{
*strm = new stream[nstreams];
stream *s = *strm;
for(int i = 0; i < nstreams; i++) {
CUDA_CHECK( cudaStreamCreate(&s[i].strm) );
}
s[0].start = 0;
s[0].len = vecsize / nstreams;
s[0].len += vecsize % nstreams ? 1 : 0;
for(int i = 1; i < nstreams; i++) {
int offset = vecsize / nstreams;
if(i < vecsize % nstreams) {
offset++;
}
s[i].len = offset;
s[i].start = s[i-1].start + offset;
}
}
// Delete the streams
void destroy_streams(int nstreams, stream *s)
{
for(int i = 0; i < nstreams; i++) {
CUDA_CHECK( cudaStreamDestroy(s[i].strm) );
}
delete[] s;
}
int main(int argc, char *argv[])
{
const int ThreadsInBlock = 512;
int iterations;
double *dA, *dB, *dC;
double *hA, *hB, *hC;
double ref_value;
float gputime_ref, gputimes[N_TESTS];
stream *s;
cudaDeviceProp prop;
if (argc < 2) {
printf("Usage: %s N\nwhere N is the length of the vector.\n",
argv[0]);
exit(EXIT_FAILURE);
}
int N = atoi(argv[1]);
// Determine the number of available multiprocessors on the device.
// It is used for a coarse adjustment of the computation part of
// this test.
cudaGetDeviceProperties(&prop, 0);
iterations = (prop.multiProcessorCount + 1) / 2;
// Add here the host memory allocation routines (page-locked)
CUDA_CHECK( cudaMallocHost((void**)&hA, sizeof(double) * N) );
CUDA_CHECK( cudaMallocHost((void**)&hB, sizeof(double) * N) );
CUDA_CHECK( cudaMallocHost((void**)&hC, sizeof(double) * N) );
for(int i = 0; i < N; ++i) {
hA[i] = 1.0;
hB[i] = 2.0;
}
ref_value = 3.0 * ITER_MULTIPLIER * iterations;
CUDA_CHECK( cudaMalloc((void**)&dA, sizeof(double) * N) );
CUDA_CHECK( cudaMalloc((void**)&dB, sizeof(double) * N) );
CUDA_CHECK( cudaMalloc((void**)&dC, sizeof(double) * N) );
// Check the timings of default stream first
default_stream(hC, hA, hB, dC, dA, dB, N, &gputime_ref, ThreadsInBlock,
iterations);
// Here we loop over the test. On each iteration, we double the number
// of streams.
for(int strm = 0; strm < N_TESTS; strm++) {
int stream_count = 1<<strm;
create_streams(stream_count, N, &s);
streamtest(hC, hA, hB, dC, dA, dB, s, stream_count, &gputimes[strm],
ThreadsInBlock, iterations);
destroy_streams(stream_count, s);
}
CUDA_CHECK( cudaFree((void*)dA) );
CUDA_CHECK( cudaFree((void*)dB) );
CUDA_CHECK( cudaFree((void*)dC) );
int errorsum = 0;
for (int i = 0; i < N; i++) {
errorsum += hC[i] - ref_value;
}
printf("Error sum = %i\n", errorsum);
printf("Time elapsed for reference run: %f\n", gputime_ref / 1000.);
for(int i = 0; i < N_TESTS; i++) {
printf("Time elapsed for test %2i: %f\n", 1<<i,
gputimes[i] / 1000.);
}
// Add here the correct host memory freeing routines
CUDA_CHECK( cudaFreeHost((void*)hA) );
CUDA_CHECK( cudaFreeHost((void*)hB) );
CUDA_CHECK( cudaFreeHost((void*)hC) );
return 0;
}
|
5ad3cdfdb0551a118fd85bcbc07aa3f1f2211113.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void profileSubphaseComputeRestriction_kernel() {} | 5ad3cdfdb0551a118fd85bcbc07aa3f1f2211113.cu | #include "includes.h"
__global__ void profileSubphaseComputeRestriction_kernel() {} |
58ecc1fd4cc74c5a4407e4606a230a07842e6cdc.hip | // !!! This is a file automatically generated by hipify!!!
#include "separableConvolution.h"
#include "hip/hip_runtime.h"
#include "book.h"
#include <math.h>
#define KERNEL_RADIUS 31
#define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1)
__constant__ float c_Kernel[ KERNEL_LENGTH ];
void setConvolutionKernel_63( float *h_Kernel )
{
hipMemcpyToSymbol( c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float) );
}
// how many threads per block in x (total num threads: x*y)
#define ROWS_BLOCKDIM_X 32
// how many threads per block in y
#define ROWS_BLOCKDIM_Y 8
// how many pixels in x are convolved by each thread
#define ROWS_RESULT_STEPS 8
// these are the border pixels (loaded to support the kernel width for processing)
// the effective border width is ROWS_HALO_STEPS * ROWS_BLOCKDIM_X, which has to be
// larger or equal to the kernel radius to work
#define ROWS_HALO_STEPS 1
#define COLUMNS_BLOCKDIM_X 8
#define COLUMNS_BLOCKDIM_Y 32
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 1
#define DEPTH_BLOCKDIM_X 8
#define DEPTH_BLOCKDIM_Z 32
#define DEPTH_RESULT_STEPS 8
#define DEPTH_HALO_STEPS 1
__global__ void convolutionX_63_Kernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
__shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
const int baseZ = blockIdx.z;
const int firstPixelInLine = ROWS_BLOCKDIM_X * ROWS_HALO_STEPS - threadIdx.x;
const int lastPixelInLine = imageW - baseX - 1;
// set the input and output arrays to the right offset (actually the output is not at the right offset, but this is corrected later)
d_Src += baseZ * imageH * imageW + baseY * imageW + baseX;
d_Dst += baseZ * imageH * imageW + baseY * imageW + baseX;
// Load main data
// Start copying after the ROWS_HALO_STEPS, only the original data that will be convolved
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : outofboundsvalue;
else
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[ lastPixelInLine ];
}
// Load left halo
// If the data fetched is outside of the image (note: baseX can be <0 for the first block) , use a zero-out of bounds strategy
#pragma unroll
for (int i = 0; i < ROWS_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : outofboundsvalue;
else
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[ firstPixelInLine ];
}
//Load right halo
#pragma unroll
for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : outofboundsvalue;
else
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[ lastPixelInLine ];
}
//Compute and store results
__syncthreads();
// this pixel is not part of the image and does not need to be convolved
if ( baseY >= imageH )
return;
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
if (imageW - baseX > i * ROWS_BLOCKDIM_X)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j];
}
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
}
__global__ void convolutionY_63_Kernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
const int baseZ = blockIdx.z;
const int firstPixelInLine = (COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS - threadIdx.y) * imageW;
const int lastPixelInLine = (imageH - baseY - 1) * imageW;
d_Src += baseZ * imageH * imageW + baseY * imageW + baseX;
d_Dst += baseZ * imageH * imageW + baseY * imageW + baseX;
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : outofboundsvalue;
else
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : d_Src[ lastPixelInLine ];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : outofboundsvalue;
else
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : d_Src[ firstPixelInLine ];
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : outofboundsvalue;
else
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : d_Src[ lastPixelInLine ];
}
//Compute and store results
__syncthreads();
// this pixel is not part of the image and does not need to be convolved
if ( baseX >= imageW )
return;
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
if (imageH - baseY > i * COLUMNS_BLOCKDIM_Y)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
d_Dst[i * COLUMNS_BLOCKDIM_Y * imageW] = sum;
}
}
}
__global__ void convolutionZ_63_Kernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
// here it is [x][z], we leave out y as it has a size of 1
__shared__ float s_Data[DEPTH_BLOCKDIM_X][(DEPTH_RESULT_STEPS + 2 * DEPTH_HALO_STEPS) * DEPTH_BLOCKDIM_Z + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * DEPTH_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y;
const int baseZ = (blockIdx.z * DEPTH_RESULT_STEPS - DEPTH_HALO_STEPS) * DEPTH_BLOCKDIM_Z + threadIdx.z;
const int firstPixelInLine = (DEPTH_BLOCKDIM_Z * DEPTH_HALO_STEPS - threadIdx.z) * imageW * imageH;
const int lastPixelInLine = (imageD - baseZ - 1) * imageW * imageH;
d_Src += baseZ * imageH * imageW + baseY * imageW + baseX;
d_Dst += baseZ * imageH * imageW + baseY * imageW + baseX;
//Main data
#pragma unroll
for (int i = DEPTH_HALO_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (imageD - baseZ > i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (imageD - baseZ > i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : outofboundsvalue;
else
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (imageD - baseZ > i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : d_Src[ lastPixelInLine ];
}
//Upper halo
#pragma unroll
for (int i = 0; i < DEPTH_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (baseZ >= -i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (baseZ >= -i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : outofboundsvalue;
else
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (baseZ >= -i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : d_Src[ firstPixelInLine ];
}
//Lower halo
#pragma unroll
for (int i = DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS + DEPTH_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z]= (imageD - baseZ > i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z]= (imageD - baseZ > i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : outofboundsvalue;
else
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z]= (imageD - baseZ > i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : d_Src[ lastPixelInLine ];
}
//Compute and store results
__syncthreads();
// this pixel is not part of the image and does not need to be convolved
if ( baseX >= imageW )
return;
#pragma unroll
for (int i = DEPTH_HALO_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i++)
{
if (imageD - baseZ > i * DEPTH_BLOCKDIM_Z)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z + j];
}
d_Dst[i * DEPTH_BLOCKDIM_Z * imageW * imageH] = sum;
}
}
}
void convolutionX_63( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
int blocksX = imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) + imin( 1, imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) );
int blocksY = imageH / ROWS_BLOCKDIM_Y + imin( 1, imageH % ROWS_BLOCKDIM_Y );
int blocksZ = imageD;
dim3 blocks(blocksX, blocksY, blocksZ);
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y, 1);
hipLaunchKernelGGL(( convolutionX_63_Kernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageD, outofbounds, outofboundsvalue );
}
void convolutionY_63( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
int blocksX = imageW / COLUMNS_BLOCKDIM_X + imin( 1, imageW % COLUMNS_BLOCKDIM_X );
int blocksY = imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) + imin( 1, imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) );
int blocksZ = imageD;
dim3 blocks(blocksX, blocksY, blocksZ);
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y, 1);
hipLaunchKernelGGL(( convolutionY_63_Kernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageD, outofbounds, outofboundsvalue );
}
void convolutionZ_63( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
int blocksX = imageW / DEPTH_BLOCKDIM_X + imin(1, imageW % DEPTH_BLOCKDIM_X);
int blocksY = imageH;
int blocksZ = imageD / (DEPTH_RESULT_STEPS * DEPTH_BLOCKDIM_Z) + imin( 1, imageD % (DEPTH_RESULT_STEPS * DEPTH_BLOCKDIM_Z) );
dim3 blocks(blocksX, blocksY, blocksZ);
dim3 threads(DEPTH_BLOCKDIM_X, 1, DEPTH_BLOCKDIM_Z);
hipLaunchKernelGGL(( convolutionZ_63_Kernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageD, outofbounds, outofboundsvalue );
}
extern "C" int convolve_63( float *image, float *kernelX, float *kernelY, float *kernelZ, int imageW, int imageH, int imageD, int convolveX, int convolveY, int convolveZ, int outofbounds, float outofboundsvalue, int devCUDA )
{
if ( devCUDA < 0 )
{
fprintf(stderr, "Cuda device '%i' is smaller 0 indicating to compute it on the CPU, doing that now (slow, not multithreaded)\n", devCUDA );
int kx = KERNEL_LENGTH;
int ky = KERNEL_LENGTH;
int kz = KERNEL_LENGTH;
if ( convolveX == 0 )
{
kx = 1;
kernelX[ 0 ] = 1;
}
if ( convolveY == 0 )
{
ky = 1;
kernelY[ 0 ] = 1;
}
if ( convolveZ == 0 )
{
kz = 1;
kernelZ[ 0 ] = 1;
}
convolutionCPU( image, kernelX, kernelY, kernelZ, kx, ky, kz, imageW, imageH, imageD, outofbounds, outofboundsvalue );
return -1;
}
//fprintf(stderr, "Cuda device: %i\n", devCUDA );
//fprintf(stderr, "Convolving X: %i\n", convolveX );
//fprintf(stderr, "Convolving Y: %i\n", convolveY );
//fprintf(stderr, "Convolving Z: %i\n", convolveZ );
//fprintf(stderr, "Image Size X: %i\n", imageW );
//fprintf(stderr, "Image Size Y: %i\n", imageH );
//fprintf(stderr, "Image Size Z: %i\n", imageD );
float *d_Input, *d_Output;
hipSetDevice( devCUDA );
// allocate memory for CUDA
HANDLE_ERROR( hipMalloc((void **)&d_Input, imageW * imageH * imageD * sizeof(float)) );
HANDLE_ERROR( hipMalloc((void **)&d_Output, imageW * imageH * imageD * sizeof(float)) );
// copy input to graphics card
HANDLE_ERROR( hipMemcpy(d_Input, image, imageW * imageH * imageD * sizeof(float), hipMemcpyHostToDevice) );
// FOR DEBUG ONLY! set the output to zero
//float *tmp;
//tmp = (float *)malloc(imageW * imageH * imageD * sizeof(float));
//memset(tmp, 0, imageW * imageH * imageD * sizeof(float));
//HANDLE_ERROR( hipMemcpy(d_Output, tmp, imageW * imageH * imageD * sizeof(float), hipMemcpyHostToDevice) );
// FOR DEBUG ONLY!
HANDLE_ERROR( hipDeviceSynchronize() );
int in = 0;
if ( convolveX != 0 )
{
setConvolutionKernel_63( kernelX );
convolutionX_63( d_Output, d_Input, imageW, imageH, imageD, outofbounds, outofboundsvalue );
in = 1;
}
if ( convolveY != 0 )
{
setConvolutionKernel_63( kernelY );
if ( in == 0 )
{
convolutionY_63( d_Output, d_Input, imageW, imageH, imageD, outofbounds, outofboundsvalue );
in = 1;
}
else
{
convolutionY_63( d_Input, d_Output, imageW, imageH, imageD, outofbounds, outofboundsvalue );
in = 0;
}
}
if ( convolveZ != 0 )
{
setConvolutionKernel_63( kernelZ );
if ( in == 0 )
{
convolutionZ_63( d_Output, d_Input, imageW, imageH, imageD, outofbounds, outofboundsvalue );
in = 1;
}
else
{
convolutionZ_63( d_Input, d_Output, imageW, imageH, imageD, outofbounds, outofboundsvalue );
in = 0;
}
}
HANDLE_ERROR( hipDeviceSynchronize() );
// copy back
if ( in == 1 )
HANDLE_ERROR( hipMemcpy(image, d_Output, imageW * imageH * imageD * sizeof(float), hipMemcpyDeviceToHost) );
else
HANDLE_ERROR( hipMemcpy(image, d_Input, imageW * imageH * imageD * sizeof(float), hipMemcpyDeviceToHost) );
HANDLE_ERROR( hipFree(d_Output) );
HANDLE_ERROR( hipFree(d_Input) );
hipDeviceReset();
return -1; // true
}
| 58ecc1fd4cc74c5a4407e4606a230a07842e6cdc.cu | #include "separableConvolution.h"
#include "cuda.h"
#include "book.h"
#include <math.h>
#define KERNEL_RADIUS 31
#define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1)
__constant__ float c_Kernel[ KERNEL_LENGTH ];
void setConvolutionKernel_63( float *h_Kernel )
{
cudaMemcpyToSymbol( c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float) );
}
// how many threads per block in x (total num threads: x*y)
#define ROWS_BLOCKDIM_X 32
// how many threads per block in y
#define ROWS_BLOCKDIM_Y 8
// how many pixels in x are convolved by each thread
#define ROWS_RESULT_STEPS 8
// these are the border pixels (loaded to support the kernel width for processing)
// the effective border width is ROWS_HALO_STEPS * ROWS_BLOCKDIM_X, which has to be
// larger or equal to the kernel radius to work
#define ROWS_HALO_STEPS 1
#define COLUMNS_BLOCKDIM_X 8
#define COLUMNS_BLOCKDIM_Y 32
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 1
#define DEPTH_BLOCKDIM_X 8
#define DEPTH_BLOCKDIM_Z 32
#define DEPTH_RESULT_STEPS 8
#define DEPTH_HALO_STEPS 1
__global__ void convolutionX_63_Kernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
__shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
const int baseZ = blockIdx.z;
const int firstPixelInLine = ROWS_BLOCKDIM_X * ROWS_HALO_STEPS - threadIdx.x;
const int lastPixelInLine = imageW - baseX - 1;
// set the input and output arrays to the right offset (actually the output is not at the right offset, but this is corrected later)
d_Src += baseZ * imageH * imageW + baseY * imageW + baseX;
d_Dst += baseZ * imageH * imageW + baseY * imageW + baseX;
// Load main data
// Start copying after the ROWS_HALO_STEPS, only the original data that will be convolved
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : outofboundsvalue;
else
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[ lastPixelInLine ];
}
// Load left halo
// If the data fetched is outside of the image (note: baseX can be <0 for the first block) , use a zero-out of bounds strategy
#pragma unroll
for (int i = 0; i < ROWS_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : outofboundsvalue;
else
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[ firstPixelInLine ];
}
//Load right halo
#pragma unroll
for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : outofboundsvalue;
else
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[ lastPixelInLine ];
}
//Compute and store results
__syncthreads();
// this pixel is not part of the image and does not need to be convolved
if ( baseY >= imageH )
return;
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
if (imageW - baseX > i * ROWS_BLOCKDIM_X)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j];
}
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
}
__global__ void convolutionY_63_Kernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
const int baseZ = blockIdx.z;
const int firstPixelInLine = (COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS - threadIdx.y) * imageW;
const int lastPixelInLine = (imageH - baseY - 1) * imageW;
d_Src += baseZ * imageH * imageW + baseY * imageW + baseX;
d_Dst += baseZ * imageH * imageW + baseY * imageW + baseX;
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : outofboundsvalue;
else
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : d_Src[ lastPixelInLine ];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : outofboundsvalue;
else
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : d_Src[ firstPixelInLine ];
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : outofboundsvalue;
else
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : d_Src[ lastPixelInLine ];
}
//Compute and store results
__syncthreads();
// this pixel is not part of the image and does not need to be convolved
if ( baseX >= imageW )
return;
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
if (imageH - baseY > i * COLUMNS_BLOCKDIM_Y)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
d_Dst[i * COLUMNS_BLOCKDIM_Y * imageW] = sum;
}
}
}
__global__ void convolutionZ_63_Kernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
// here it is [x][z], we leave out y as it has a size of 1
__shared__ float s_Data[DEPTH_BLOCKDIM_X][(DEPTH_RESULT_STEPS + 2 * DEPTH_HALO_STEPS) * DEPTH_BLOCKDIM_Z + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * DEPTH_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y;
const int baseZ = (blockIdx.z * DEPTH_RESULT_STEPS - DEPTH_HALO_STEPS) * DEPTH_BLOCKDIM_Z + threadIdx.z;
const int firstPixelInLine = (DEPTH_BLOCKDIM_Z * DEPTH_HALO_STEPS - threadIdx.z) * imageW * imageH;
const int lastPixelInLine = (imageD - baseZ - 1) * imageW * imageH;
d_Src += baseZ * imageH * imageW + baseY * imageW + baseX;
d_Dst += baseZ * imageH * imageW + baseY * imageW + baseX;
//Main data
#pragma unroll
for (int i = DEPTH_HALO_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (imageD - baseZ > i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (imageD - baseZ > i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : outofboundsvalue;
else
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (imageD - baseZ > i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : d_Src[ lastPixelInLine ];
}
//Upper halo
#pragma unroll
for (int i = 0; i < DEPTH_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (baseZ >= -i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (baseZ >= -i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : outofboundsvalue;
else
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (baseZ >= -i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : d_Src[ firstPixelInLine ];
}
//Lower halo
#pragma unroll
for (int i = DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS + DEPTH_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z]= (imageD - baseZ > i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z]= (imageD - baseZ > i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : outofboundsvalue;
else
s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z]= (imageD - baseZ > i * DEPTH_BLOCKDIM_Z) ? d_Src[i * DEPTH_BLOCKDIM_Z * imageW * imageH] : d_Src[ lastPixelInLine ];
}
//Compute and store results
__syncthreads();
// this pixel is not part of the image and does not need to be convolved
if ( baseX >= imageW )
return;
#pragma unroll
for (int i = DEPTH_HALO_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i++)
{
if (imageD - baseZ > i * DEPTH_BLOCKDIM_Z)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z + j];
}
d_Dst[i * DEPTH_BLOCKDIM_Z * imageW * imageH] = sum;
}
}
}
void convolutionX_63( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
int blocksX = imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) + imin( 1, imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) );
int blocksY = imageH / ROWS_BLOCKDIM_Y + imin( 1, imageH % ROWS_BLOCKDIM_Y );
int blocksZ = imageD;
dim3 blocks(blocksX, blocksY, blocksZ);
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y, 1);
convolutionX_63_Kernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageD, outofbounds, outofboundsvalue );
}
void convolutionY_63( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
int blocksX = imageW / COLUMNS_BLOCKDIM_X + imin( 1, imageW % COLUMNS_BLOCKDIM_X );
int blocksY = imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) + imin( 1, imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) );
int blocksZ = imageD;
dim3 blocks(blocksX, blocksY, blocksZ);
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y, 1);
convolutionY_63_Kernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageD, outofbounds, outofboundsvalue );
}
void convolutionZ_63( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
int blocksX = imageW / DEPTH_BLOCKDIM_X + imin(1, imageW % DEPTH_BLOCKDIM_X);
int blocksY = imageH;
int blocksZ = imageD / (DEPTH_RESULT_STEPS * DEPTH_BLOCKDIM_Z) + imin( 1, imageD % (DEPTH_RESULT_STEPS * DEPTH_BLOCKDIM_Z) );
dim3 blocks(blocksX, blocksY, blocksZ);
dim3 threads(DEPTH_BLOCKDIM_X, 1, DEPTH_BLOCKDIM_Z);
convolutionZ_63_Kernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageD, outofbounds, outofboundsvalue );
}
extern "C" int convolve_63( float *image, float *kernelX, float *kernelY, float *kernelZ, int imageW, int imageH, int imageD, int convolveX, int convolveY, int convolveZ, int outofbounds, float outofboundsvalue, int devCUDA )
{
if ( devCUDA < 0 )
{
fprintf(stderr, "Cuda device '%i' is smaller 0 indicating to compute it on the CPU, doing that now (slow, not multithreaded)\n", devCUDA );
int kx = KERNEL_LENGTH;
int ky = KERNEL_LENGTH;
int kz = KERNEL_LENGTH;
if ( convolveX == 0 )
{
kx = 1;
kernelX[ 0 ] = 1;
}
if ( convolveY == 0 )
{
ky = 1;
kernelY[ 0 ] = 1;
}
if ( convolveZ == 0 )
{
kz = 1;
kernelZ[ 0 ] = 1;
}
convolutionCPU( image, kernelX, kernelY, kernelZ, kx, ky, kz, imageW, imageH, imageD, outofbounds, outofboundsvalue );
return -1;
}
//fprintf(stderr, "Cuda device: %i\n", devCUDA );
//fprintf(stderr, "Convolving X: %i\n", convolveX );
//fprintf(stderr, "Convolving Y: %i\n", convolveY );
//fprintf(stderr, "Convolving Z: %i\n", convolveZ );
//fprintf(stderr, "Image Size X: %i\n", imageW );
//fprintf(stderr, "Image Size Y: %i\n", imageH );
//fprintf(stderr, "Image Size Z: %i\n", imageD );
float *d_Input, *d_Output;
cudaSetDevice( devCUDA );
// allocate memory for CUDA
HANDLE_ERROR( cudaMalloc((void **)&d_Input, imageW * imageH * imageD * sizeof(float)) );
HANDLE_ERROR( cudaMalloc((void **)&d_Output, imageW * imageH * imageD * sizeof(float)) );
// copy input to graphics card
HANDLE_ERROR( cudaMemcpy(d_Input, image, imageW * imageH * imageD * sizeof(float), cudaMemcpyHostToDevice) );
// FOR DEBUG ONLY! set the output to zero
//float *tmp;
//tmp = (float *)malloc(imageW * imageH * imageD * sizeof(float));
//memset(tmp, 0, imageW * imageH * imageD * sizeof(float));
//HANDLE_ERROR( cudaMemcpy(d_Output, tmp, imageW * imageH * imageD * sizeof(float), cudaMemcpyHostToDevice) );
// FOR DEBUG ONLY!
HANDLE_ERROR( cudaDeviceSynchronize() );
int in = 0;
if ( convolveX != 0 )
{
setConvolutionKernel_63( kernelX );
convolutionX_63( d_Output, d_Input, imageW, imageH, imageD, outofbounds, outofboundsvalue );
in = 1;
}
if ( convolveY != 0 )
{
setConvolutionKernel_63( kernelY );
if ( in == 0 )
{
convolutionY_63( d_Output, d_Input, imageW, imageH, imageD, outofbounds, outofboundsvalue );
in = 1;
}
else
{
convolutionY_63( d_Input, d_Output, imageW, imageH, imageD, outofbounds, outofboundsvalue );
in = 0;
}
}
if ( convolveZ != 0 )
{
setConvolutionKernel_63( kernelZ );
if ( in == 0 )
{
convolutionZ_63( d_Output, d_Input, imageW, imageH, imageD, outofbounds, outofboundsvalue );
in = 1;
}
else
{
convolutionZ_63( d_Input, d_Output, imageW, imageH, imageD, outofbounds, outofboundsvalue );
in = 0;
}
}
HANDLE_ERROR( cudaDeviceSynchronize() );
// copy back
if ( in == 1 )
HANDLE_ERROR( cudaMemcpy(image, d_Output, imageW * imageH * imageD * sizeof(float), cudaMemcpyDeviceToHost) );
else
HANDLE_ERROR( cudaMemcpy(image, d_Input, imageW * imageH * imageD * sizeof(float), cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaFree(d_Output) );
HANDLE_ERROR( cudaFree(d_Input) );
cudaDeviceReset();
return -1; // true
}
|
58818dca84d12b6603567b9e4adffa5b6e7e27f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* \file lbmFlowAroundCylinder.cu
* \brief Cuda version based on lbm_palabos_friendly (standard C).
* \author Adrien Python
* \date 22.01.2017
*/
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <math.h>
#include <stdbool.h>
#include <libgen.h>
#include <pgm.h>
#include <timing.h>
#define RE 220.0 // Reynolds number
#define NX 420 // Numer of lattice nodes (width)
#define NY 180 // Numer of lattice nodes (height)
#define LY ((NY) - 1) // Height of the domain in lattice units
#define CX ((NX) / 4) // X coordinates of the cylinder
#define CY ((NY) / 2) // Y coordinates of the cylinder
#define R ((NY) / 9) // Cylinder radius
#define ULB 0.04 // Velocity in lattice units
#define NULB ((ULB) * (R) / (RE)) // Viscoscity in lattice units
#define OMEGA ((double)1. / (3*(NULB)+0.5)) // Relaxation parameter
#define NB_BLOCKS 1
//#define NB_THREADS 100
#define SQUARE(a) ((a)*(a))
#define GPU_SQUARE(a) (__dmul_rn(a,a))
#define INDEX_2D_FROM_1D(x, y, i) do { (y) = (i)/(NX), (x) = (i)%(NX); } while (0)
typedef enum { OUT_NONE, OUT_FIN, OUT_IMG } out_mode;
typedef struct {
bool obstacles[NX][NY]; // Should reside in lbm_consts but is too big for constant memory
double u[NX][NY][2];
double fin[NX][NY][9];
double fout[NX][NY][9];
} lbm_vars;
typedef struct {
size_t col[3][3];
size_t opp[9];
ssize_t v[2][9];
double t[9];
double vel[NY];
} lbm_consts;
__constant__ lbm_consts d_consts;
const ssize_t V[][9] = {
{ 1, 1, 1, 0, 0, 0,-1,-1,-1 },
{ 1, 0,-1, 1, 0,-1, 1, 0,-1 }
};
const double T[] = { 1./36, 1./9, 1./36, 1./9, 4./9, 1./9, 1./36, 1./9, 1./36 };
#define HANDLE_ERROR(ans) (handleError((ans), __FILE__, __LINE__))
inline void handleError(hipError_t code, const char *file, int line)
{
if (code != hipSuccess) {
fprintf(stderr,"CUDA assert: %s %s %d\n", hipGetErrorString(code), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_KERNEL_ERROR(...) \
do { \
__VA_ARGS__; \
HANDLE_ERROR( hipPeekAtLastError() ); \
HANDLE_ERROR( hipDeviceSynchronize() ); \
} while(0)
/**
* Setup: cylindrical obstacle and velocity inlet with perturbation
* Creation of a mask with boolean values, defining the shape of the obstacle.
*/
static void initObstacles(lbm_vars* vars)
{
for (int x = 0; x < NX; x++) {
for (int y = 0; y < NY; y++) {
vars->obstacles[x][y] = SQUARE(x-CX) + SQUARE(y-CY) < SQUARE(R);
}
}
}
/**
* Initial velocity profile: almost zero, with a slight perturbation to trigger
* the instability.
*/
static void initVelocity(double* vel)
{
for (int y = 0; y < NY; y++) {
vel[y] = ULB * (1 + 0.0001 * sin( y / (double)LY * 2 * M_PI) );
}
}
static void initCol(size_t* col, ssize_t v0)
{
for (int f = 0, i = 0; f < 9 && i < 3; f++) {
if (V[0][f] == v0) {
col[i++] = f;
}
}
}
static void initOpp(size_t* opp)
{
for (int f = 0; f < 9; f++) {
for (int g = 0; g < 9; g++) {
if (V[0][f] == -V[0][g] && V[1][f] == -V[1][g]) {
opp[f] = g;
break;
}
}
}
}
__host__ static void h_equilibrium(double* feq, double rho, double u)
{
double usqr = 3./2 * ( SQUARE(u) );
for (int f = 0; f < 9; f++) {
double cu = 3 * ( V[0][f] * u );
feq[f] = rho * T[f] * ( 1 + cu + 0.5 * SQUARE(cu) - usqr );
}
}
__device__ static void d_equilibrium(double* feq, double rho, double u0, double u1)
{
double usqr = __dmul_rn(3./2, __dadd_rn( GPU_SQUARE(u0), GPU_SQUARE(u1) ));
for (int f = 0; f < 9; f++) {
double cu = 3 * ( d_consts.v[0][f] * u0 + d_consts.v[1][f] * u1 );
feq[f] = rho * d_consts.t[f] * ( 1 + cu + 0.5 * SQUARE(cu) - usqr );
}
}
__device__ static void macroscopic(double* fin, double* rho, double* u0, double* u1)
{
*rho = *u0 = *u1 = 0;
for (int f = 0; f < 9; f++) {
*rho += fin[f];
*u0 += d_consts.v[0][f] * fin[f];
*u1 += d_consts.v[1][f] * fin[f];
}
*u0 /= *rho;
*u1 /= *rho;
}
__global__ void lbm_computation(lbm_vars *d_vars)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < NX*NY; idx += blockDim.x * gridDim.x) {
int x, y;
INDEX_2D_FROM_1D(x, y, idx);
if (x == NX-1) {
// Right wall: outflow condition.
for (int i = 0; i < 3; i++) {
int f = d_consts.col[2][i];
d_vars->fin[NX-1][y][f] = d_vars->fin[NX-2][y][f];
}
}
// Compute macroscopic variables, density and velocity
double rho, u0, u1;
macroscopic(d_vars->fin[x][y], &rho, &u0, &u1);
if (x == 0) {
// Left wall: inflow condition
u0 = d_consts.vel[y];
u1 = 0;
// Calculate the density
double s2 = 0, s3 = 0;
for (size_t i = 0; i < 3; i++) {
s2 += d_vars->fin[0][y][d_consts.col[1][i]];
s3 += d_vars->fin[0][y][d_consts.col[2][i]];
}
rho = 1./(1 - u0) * (s2 + 2*s3);
}
// Compute equilibrium
double feq[9];
d_equilibrium(feq, rho, u0, u1);
if (x == 0) {
for (size_t i = 0, f = d_consts.col[0][i]; i < 3; f = d_consts.col[0][++i]) {
d_vars->fin[0][y][f] = feq[f] + d_vars->fin[0][y][d_consts.opp[f]] - feq[d_consts.opp[f]];
}
}
for (size_t f = 0; f < 9; f++) {
if (d_vars->obstacles[x][y]) {
// Bounce-back condition for obstacle
d_vars->fout[x][y][f] = d_vars->fin[x][y][d_consts.opp[f]];
} else {
// Collision step
d_vars->fout[x][y][f] = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(d_vars->fin[x][y][f], - feq[f])), d_vars->fin[x][y][f]);
}
}
d_vars->u[x][y][0] = u0;
d_vars->u[x][y][1] = u1;
}
}
__global__ void lbm_streaming(lbm_vars *d_vars)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < NX*NY; idx += blockDim.x * gridDim.x) {
int x, y;
INDEX_2D_FROM_1D(x, y, idx);
// Streaming step
for (size_t f = 0; f < 9; f++) {
size_t x_dst = (x + NX + d_consts.v[0][f]) % NX;
size_t y_dst = (y + NY + d_consts.v[1][f]) % NY;
d_vars->fin[x_dst][y_dst][f] = d_vars->fout[x][y][f];
}
}
}
void output_variables(char* filename, double var[NX][NY][9])
{
FILE* file = fopen(filename, "w");
for (size_t x = 0; x < NX; x++) {
for (size_t y = 0; y < NY; y++) {
for (size_t f = 0; f < 9; ++f) {
fprintf(file, "%64.60f\n", var[x][y][f]);
}
}
}
fclose(file);
}
void output_image(char* filename, double u[NX][NY][2])
{
pgm_image* pgm = pgm_create(NX, NY);
for (size_t x = 0; x < NX; x++) {
for (size_t y = 0; y < NY; y++) {
double vel = sqrt( SQUARE(u[x][y][0]) + SQUARE(u[x][y][1]) );
int color = 255 * min(vel * 10, 1.0);
pgm_set_pixel(pgm, x, y, color);
}
}
pgm_write(pgm, filename);
pgm_destroy(pgm);
}
int getThreads(int width, int height) {
int dev, threads;
hipDeviceProp_t prop;
HANDLE_ERROR( hipGetDevice(&dev) );
HANDLE_ERROR( hipGetDeviceProperties(&prop, dev) );
int maxThreads = min(prop.maxThreadsDim[0], prop.maxThreadsPerBlock);
#ifdef NB_THREADS
threads = NB_THREADS;
#else
threads = prop.maxThreadsDim[0];
#endif
if (threads > maxThreads)
threads = maxThreads;
return min(threads, width*height);
}
float get_lups(int lattices, int iterations, long ns_time_diff)
{
return lattices * iterations * 1000000000.0f / ns_time_diff;
}
int main(int argc, char * const argv[])
{
// Init options to default values
const char* out_path = ".";
const char* out_pref = "lbm";
out_mode out = OUT_NONE;
ssize_t max_iter = 0;
size_t out_interval = 0;
bool print_lups = false;
bool print_avg_lups = false;
// Read arguments
while (optind < argc) {
switch (getopt(argc, argv, "pfi:I:o:O:lL")) {
case 'p': { out = OUT_IMG; break; }
case 'f': { out = OUT_FIN; break; }
case 'i': { max_iter = strtol(optarg, NULL, 10); break; }
case 'I': { out_interval = strtol(optarg, NULL, 10); break; }
case 'o': { out_path = optarg; break; }
case 'O': { out_pref = optarg; break; }
case 'l': { print_lups = true; break; }
case 'L': { print_avg_lups = true; break; }
default : { goto usage; }
}
}
// check that execution mode is set (output images or fin values)
if (max_iter < 1) {
usage:
fprintf(stderr, "usage: %s (-p | -f) -i <iter> [-I <out_interval>] [-o <out_dir>] [-O <out_prefix>] [-l] [-L]\n", basename((char*)argv[0]));
fprintf(stderr, " -p : output pictures\n");
fprintf(stderr, " -f : output populations\n");
fprintf(stderr, " -i : number of iterations\n");
fprintf(stderr, " -I : output interval; (0 if only the last iteration output in required)\n");
fprintf(stderr, " -o : output file directory\n");
fprintf(stderr, " -O : output filename prefix\n");
fprintf(stderr, " -l : print lups at each output interval\n");
fprintf(stderr, " -L : print average lups at the end\n");
return EXIT_FAILURE;
}
if (out == OUT_NONE) {
fprintf(stderr, "No output mode specified.\n");
}
lbm_consts* h_consts = (lbm_consts*)malloc(sizeof(lbm_consts));
initCol(h_consts->col[0], 1);
initCol(h_consts->col[1], 0);
initCol(h_consts->col[2], -1);
initOpp(h_consts->opp);
initVelocity(h_consts->vel);
memcpy(h_consts->v, V, sizeof(V));
memcpy(h_consts->t, T, sizeof(T));
HANDLE_ERROR(hipMemcpyToSymbol(d_consts, h_consts, sizeof(lbm_consts)));
lbm_vars *h_vars = (lbm_vars*)malloc(sizeof(lbm_vars));
initObstacles(h_vars);
// Initialization of the populations at equilibrium with the given velocity.
for (int y = 0; y < NY; y++) {
for (int x = 0; x < NX; x++) {
h_equilibrium(h_vars->fin[x][y], 1.0, h_consts->vel[y]);
}
}
lbm_vars *d_vars;
HANDLE_ERROR(hipMalloc(&d_vars, sizeof(lbm_vars)));
HANDLE_ERROR(hipMemcpy(d_vars, h_vars, sizeof(lbm_vars), hipMemcpyHostToDevice));
dim3 dimBlock(NB_BLOCKS);
dim3 dimGrid(getThreads(NX, NY));
long time_diff, total_time_diff = 0;
start_time_t start_time;
timing_start(&start_time);
for (int iter = 1; iter <= max_iter; iter++) {
hipLaunchKernelGGL(( HANDLE_KERNEL_ERROR(lbm_computation), dim3(dimBlock), dim3(dimGrid), 0, 0, d_vars));
hipLaunchKernelGGL(( HANDLE_KERNEL_ERROR(lbm_streaming) , dim3(dimBlock), dim3(dimGrid), 0, 0, d_vars));
if ( (!out_interval && iter == max_iter) || (out_interval && iter % out_interval == 0) ) {
total_time_diff += time_diff = timing_stop(&start_time);
if ( print_lups ) {
size_t iter_diff = out_interval? out_interval : (size_t)max_iter;
printf("lups: %.2f\n", get_lups(NX*NY, iter_diff, time_diff));
fflush(stdout);
}
HANDLE_ERROR(hipMemcpy(h_vars, d_vars, sizeof(lbm_vars), hipMemcpyDeviceToHost));
char* filename;
if ( out == OUT_IMG ) {
if ( asprintf(&filename, "%s/%s%d.pgm", out_path, out_pref, iter) != -1 ) {
output_image(filename, h_vars->u);
free(filename);
}
}
if (out == OUT_FIN) {
if ( asprintf(&filename, "%s/%s%d.out", out_path, out_pref, iter) != -1 ) {
output_variables(filename, h_vars->fin);
free(filename);
}
}
timing_start(&start_time);
}
}
if ( print_avg_lups ) {
printf("average lups: %.2f\n", get_lups(NX*NY, max_iter, total_time_diff));
}
free(h_consts);
free(h_vars);
HANDLE_ERROR(hipFree(d_vars));
return EXIT_SUCCESS;
}
| 58818dca84d12b6603567b9e4adffa5b6e7e27f8.cu | /*!
* \file lbmFlowAroundCylinder.cu
* \brief Cuda version based on lbm_palabos_friendly (standard C).
* \author Adrien Python
* \date 22.01.2017
*/
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <math.h>
#include <stdbool.h>
#include <libgen.h>
#include <pgm.h>
#include <timing.h>
#define RE 220.0 // Reynolds number
#define NX 420 // Numer of lattice nodes (width)
#define NY 180 // Numer of lattice nodes (height)
#define LY ((NY) - 1) // Height of the domain in lattice units
#define CX ((NX) / 4) // X coordinates of the cylinder
#define CY ((NY) / 2) // Y coordinates of the cylinder
#define R ((NY) / 9) // Cylinder radius
#define ULB 0.04 // Velocity in lattice units
#define NULB ((ULB) * (R) / (RE)) // Viscoscity in lattice units
#define OMEGA ((double)1. / (3*(NULB)+0.5)) // Relaxation parameter
#define NB_BLOCKS 1
//#define NB_THREADS 100
#define SQUARE(a) ((a)*(a))
#define GPU_SQUARE(a) (__dmul_rn(a,a))
#define INDEX_2D_FROM_1D(x, y, i) do { (y) = (i)/(NX), (x) = (i)%(NX); } while (0)
typedef enum { OUT_NONE, OUT_FIN, OUT_IMG } out_mode;
typedef struct {
bool obstacles[NX][NY]; // Should reside in lbm_consts but is too big for constant memory
double u[NX][NY][2];
double fin[NX][NY][9];
double fout[NX][NY][9];
} lbm_vars;
typedef struct {
size_t col[3][3];
size_t opp[9];
ssize_t v[2][9];
double t[9];
double vel[NY];
} lbm_consts;
__constant__ lbm_consts d_consts;
const ssize_t V[][9] = {
{ 1, 1, 1, 0, 0, 0,-1,-1,-1 },
{ 1, 0,-1, 1, 0,-1, 1, 0,-1 }
};
const double T[] = { 1./36, 1./9, 1./36, 1./9, 4./9, 1./9, 1./36, 1./9, 1./36 };
#define HANDLE_ERROR(ans) (handleError((ans), __FILE__, __LINE__))
inline void handleError(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr,"CUDA assert: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_KERNEL_ERROR(...) \
do { \
__VA_ARGS__; \
HANDLE_ERROR( cudaPeekAtLastError() ); \
HANDLE_ERROR( cudaDeviceSynchronize() ); \
} while(0)
/**
* Setup: cylindrical obstacle and velocity inlet with perturbation
* Creation of a mask with boolean values, defining the shape of the obstacle.
*/
static void initObstacles(lbm_vars* vars)
{
for (int x = 0; x < NX; x++) {
for (int y = 0; y < NY; y++) {
vars->obstacles[x][y] = SQUARE(x-CX) + SQUARE(y-CY) < SQUARE(R);
}
}
}
/**
* Initial velocity profile: almost zero, with a slight perturbation to trigger
* the instability.
*/
static void initVelocity(double* vel)
{
for (int y = 0; y < NY; y++) {
vel[y] = ULB * (1 + 0.0001 * sin( y / (double)LY * 2 * M_PI) );
}
}
static void initCol(size_t* col, ssize_t v0)
{
for (int f = 0, i = 0; f < 9 && i < 3; f++) {
if (V[0][f] == v0) {
col[i++] = f;
}
}
}
static void initOpp(size_t* opp)
{
for (int f = 0; f < 9; f++) {
for (int g = 0; g < 9; g++) {
if (V[0][f] == -V[0][g] && V[1][f] == -V[1][g]) {
opp[f] = g;
break;
}
}
}
}
__host__ static void h_equilibrium(double* feq, double rho, double u)
{
double usqr = 3./2 * ( SQUARE(u) );
for (int f = 0; f < 9; f++) {
double cu = 3 * ( V[0][f] * u );
feq[f] = rho * T[f] * ( 1 + cu + 0.5 * SQUARE(cu) - usqr );
}
}
__device__ static void d_equilibrium(double* feq, double rho, double u0, double u1)
{
double usqr = __dmul_rn(3./2, __dadd_rn( GPU_SQUARE(u0), GPU_SQUARE(u1) ));
for (int f = 0; f < 9; f++) {
double cu = 3 * ( d_consts.v[0][f] * u0 + d_consts.v[1][f] * u1 );
feq[f] = rho * d_consts.t[f] * ( 1 + cu + 0.5 * SQUARE(cu) - usqr );
}
}
__device__ static void macroscopic(double* fin, double* rho, double* u0, double* u1)
{
*rho = *u0 = *u1 = 0;
for (int f = 0; f < 9; f++) {
*rho += fin[f];
*u0 += d_consts.v[0][f] * fin[f];
*u1 += d_consts.v[1][f] * fin[f];
}
*u0 /= *rho;
*u1 /= *rho;
}
__global__ void lbm_computation(lbm_vars *d_vars)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < NX*NY; idx += blockDim.x * gridDim.x) {
int x, y;
INDEX_2D_FROM_1D(x, y, idx);
if (x == NX-1) {
// Right wall: outflow condition.
for (int i = 0; i < 3; i++) {
int f = d_consts.col[2][i];
d_vars->fin[NX-1][y][f] = d_vars->fin[NX-2][y][f];
}
}
// Compute macroscopic variables, density and velocity
double rho, u0, u1;
macroscopic(d_vars->fin[x][y], &rho, &u0, &u1);
if (x == 0) {
// Left wall: inflow condition
u0 = d_consts.vel[y];
u1 = 0;
// Calculate the density
double s2 = 0, s3 = 0;
for (size_t i = 0; i < 3; i++) {
s2 += d_vars->fin[0][y][d_consts.col[1][i]];
s3 += d_vars->fin[0][y][d_consts.col[2][i]];
}
rho = 1./(1 - u0) * (s2 + 2*s3);
}
// Compute equilibrium
double feq[9];
d_equilibrium(feq, rho, u0, u1);
if (x == 0) {
for (size_t i = 0, f = d_consts.col[0][i]; i < 3; f = d_consts.col[0][++i]) {
d_vars->fin[0][y][f] = feq[f] + d_vars->fin[0][y][d_consts.opp[f]] - feq[d_consts.opp[f]];
}
}
for (size_t f = 0; f < 9; f++) {
if (d_vars->obstacles[x][y]) {
// Bounce-back condition for obstacle
d_vars->fout[x][y][f] = d_vars->fin[x][y][d_consts.opp[f]];
} else {
// Collision step
d_vars->fout[x][y][f] = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(d_vars->fin[x][y][f], - feq[f])), d_vars->fin[x][y][f]);
}
}
d_vars->u[x][y][0] = u0;
d_vars->u[x][y][1] = u1;
}
}
__global__ void lbm_streaming(lbm_vars *d_vars)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < NX*NY; idx += blockDim.x * gridDim.x) {
int x, y;
INDEX_2D_FROM_1D(x, y, idx);
// Streaming step
for (size_t f = 0; f < 9; f++) {
size_t x_dst = (x + NX + d_consts.v[0][f]) % NX;
size_t y_dst = (y + NY + d_consts.v[1][f]) % NY;
d_vars->fin[x_dst][y_dst][f] = d_vars->fout[x][y][f];
}
}
}
void output_variables(char* filename, double var[NX][NY][9])
{
FILE* file = fopen(filename, "w");
for (size_t x = 0; x < NX; x++) {
for (size_t y = 0; y < NY; y++) {
for (size_t f = 0; f < 9; ++f) {
fprintf(file, "%64.60f\n", var[x][y][f]);
}
}
}
fclose(file);
}
void output_image(char* filename, double u[NX][NY][2])
{
pgm_image* pgm = pgm_create(NX, NY);
for (size_t x = 0; x < NX; x++) {
for (size_t y = 0; y < NY; y++) {
double vel = sqrt( SQUARE(u[x][y][0]) + SQUARE(u[x][y][1]) );
int color = 255 * min(vel * 10, 1.0);
pgm_set_pixel(pgm, x, y, color);
}
}
pgm_write(pgm, filename);
pgm_destroy(pgm);
}
int getThreads(int width, int height) {
int dev, threads;
cudaDeviceProp prop;
HANDLE_ERROR( cudaGetDevice(&dev) );
HANDLE_ERROR( cudaGetDeviceProperties(&prop, dev) );
int maxThreads = min(prop.maxThreadsDim[0], prop.maxThreadsPerBlock);
#ifdef NB_THREADS
threads = NB_THREADS;
#else
threads = prop.maxThreadsDim[0];
#endif
if (threads > maxThreads)
threads = maxThreads;
return min(threads, width*height);
}
float get_lups(int lattices, int iterations, long ns_time_diff)
{
return lattices * iterations * 1000000000.0f / ns_time_diff;
}
int main(int argc, char * const argv[])
{
// Init options to default values
const char* out_path = ".";
const char* out_pref = "lbm";
out_mode out = OUT_NONE;
ssize_t max_iter = 0;
size_t out_interval = 0;
bool print_lups = false;
bool print_avg_lups = false;
// Read arguments
while (optind < argc) {
switch (getopt(argc, argv, "pfi:I:o:O:lL")) {
case 'p': { out = OUT_IMG; break; }
case 'f': { out = OUT_FIN; break; }
case 'i': { max_iter = strtol(optarg, NULL, 10); break; }
case 'I': { out_interval = strtol(optarg, NULL, 10); break; }
case 'o': { out_path = optarg; break; }
case 'O': { out_pref = optarg; break; }
case 'l': { print_lups = true; break; }
case 'L': { print_avg_lups = true; break; }
default : { goto usage; }
}
}
// check that execution mode is set (output images or fin values)
if (max_iter < 1) {
usage:
fprintf(stderr, "usage: %s (-p | -f) -i <iter> [-I <out_interval>] [-o <out_dir>] [-O <out_prefix>] [-l] [-L]\n", basename((char*)argv[0]));
fprintf(stderr, " -p : output pictures\n");
fprintf(stderr, " -f : output populations\n");
fprintf(stderr, " -i : number of iterations\n");
fprintf(stderr, " -I : output interval; (0 if only the last iteration output in required)\n");
fprintf(stderr, " -o : output file directory\n");
fprintf(stderr, " -O : output filename prefix\n");
fprintf(stderr, " -l : print lups at each output interval\n");
fprintf(stderr, " -L : print average lups at the end\n");
return EXIT_FAILURE;
}
if (out == OUT_NONE) {
fprintf(stderr, "No output mode specified.\n");
}
lbm_consts* h_consts = (lbm_consts*)malloc(sizeof(lbm_consts));
initCol(h_consts->col[0], 1);
initCol(h_consts->col[1], 0);
initCol(h_consts->col[2], -1);
initOpp(h_consts->opp);
initVelocity(h_consts->vel);
memcpy(h_consts->v, V, sizeof(V));
memcpy(h_consts->t, T, sizeof(T));
HANDLE_ERROR(cudaMemcpyToSymbol(d_consts, h_consts, sizeof(lbm_consts)));
lbm_vars *h_vars = (lbm_vars*)malloc(sizeof(lbm_vars));
initObstacles(h_vars);
// Initialization of the populations at equilibrium with the given velocity.
for (int y = 0; y < NY; y++) {
for (int x = 0; x < NX; x++) {
h_equilibrium(h_vars->fin[x][y], 1.0, h_consts->vel[y]);
}
}
lbm_vars *d_vars;
HANDLE_ERROR(cudaMalloc(&d_vars, sizeof(lbm_vars)));
HANDLE_ERROR(cudaMemcpy(d_vars, h_vars, sizeof(lbm_vars), cudaMemcpyHostToDevice));
dim3 dimBlock(NB_BLOCKS);
dim3 dimGrid(getThreads(NX, NY));
long time_diff, total_time_diff = 0;
start_time_t start_time;
timing_start(&start_time);
for (int iter = 1; iter <= max_iter; iter++) {
HANDLE_KERNEL_ERROR(lbm_computation<<<dimBlock, dimGrid>>>(d_vars));
HANDLE_KERNEL_ERROR(lbm_streaming <<<dimBlock, dimGrid>>>(d_vars));
if ( (!out_interval && iter == max_iter) || (out_interval && iter % out_interval == 0) ) {
total_time_diff += time_diff = timing_stop(&start_time);
if ( print_lups ) {
size_t iter_diff = out_interval? out_interval : (size_t)max_iter;
printf("lups: %.2f\n", get_lups(NX*NY, iter_diff, time_diff));
fflush(stdout);
}
HANDLE_ERROR(cudaMemcpy(h_vars, d_vars, sizeof(lbm_vars), cudaMemcpyDeviceToHost));
char* filename;
if ( out == OUT_IMG ) {
if ( asprintf(&filename, "%s/%s%d.pgm", out_path, out_pref, iter) != -1 ) {
output_image(filename, h_vars->u);
free(filename);
}
}
if (out == OUT_FIN) {
if ( asprintf(&filename, "%s/%s%d.out", out_path, out_pref, iter) != -1 ) {
output_variables(filename, h_vars->fin);
free(filename);
}
}
timing_start(&start_time);
}
}
if ( print_avg_lups ) {
printf("average lups: %.2f\n", get_lups(NX*NY, max_iter, total_time_diff));
}
free(h_consts);
free(h_vars);
HANDLE_ERROR(cudaFree(d_vars));
return EXIT_SUCCESS;
}
|
c295932b6b64b57327ca27d522ba25731dba74c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <memory.h>
#include <cutil.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_vector_types.h>
typedef unsigned int uint;
texture<float, 1, hipReadModeElementType> a_tex;
texture<float, 1, hipReadModeElementType> b_tex;
__global__ void testKernel(float* a, float* b)
{
uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if(index > 100) index-=100;
a[index] = 10;
__threadfence();
a[index] = tex1Dfetch(a_tex,index) + 1;
volatile float x = tex1Dfetch(a_tex,index);
x = tex1Dfetch(a_tex,index);
x = tex1Dfetch(a_tex,index);
x = tex1Dfetch(a_tex,index);
__threadfence();
__threadfence_block();
// __threadfence_system();
__syncthreads();
//volatile float bv = a[index+1];
b[index] = a[index+1];
//b[index] = tex1Dfetch(a_tex,index+1);
}
void testKernel()
{
float* da;
float* db;
hipMalloc((void**)&da, 100*sizeof(float));
hipMalloc((void**)&db, 100*sizeof(float));
hipMemset(da,0,100*sizeof(float));
hipMemset(db,0,100*sizeof(float));
float* ha;
float* hb;
hipHostMalloc((void**)&ha, 100*sizeof(float));
hipHostMalloc((void**)&hb, 100*sizeof(float));
memset(ha,0,100*sizeof(float));
memset(hb,0,100*sizeof(float));
hipBindTexture(0, a_tex, da, 100*sizeof(float));
hipBindTexture(0, b_tex, db, 100*sizeof(float));
hipLaunchKernelGGL(( testKernel), dim3(1),dim3(101), 0, 0, da,db);
hipUnbindTexture(a_tex);
hipUnbindTexture(b_tex);
hipMemcpy(ha,da,100*sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(hb,db,100*sizeof(float),hipMemcpyDeviceToHost);
return;
}
| c295932b6b64b57327ca27d522ba25731dba74c5.cu |
#include <stdio.h>
#include <memory.h>
#include <cutil.h>
#include <cuda_runtime_api.h>
#include <vector_types.h>
typedef unsigned int uint;
texture<float, 1, cudaReadModeElementType> a_tex;
texture<float, 1, cudaReadModeElementType> b_tex;
__global__ void testKernel(float* a, float* b)
{
uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if(index > 100) index-=100;
a[index] = 10;
__threadfence();
a[index] = tex1Dfetch(a_tex,index) + 1;
volatile float x = tex1Dfetch(a_tex,index);
x = tex1Dfetch(a_tex,index);
x = tex1Dfetch(a_tex,index);
x = tex1Dfetch(a_tex,index);
__threadfence();
__threadfence_block();
// __threadfence_system();
__syncthreads();
//volatile float bv = a[index+1];
b[index] = a[index+1];
//b[index] = tex1Dfetch(a_tex,index+1);
}
void testKernel()
{
float* da;
float* db;
cudaMalloc((void**)&da, 100*sizeof(float));
cudaMalloc((void**)&db, 100*sizeof(float));
cudaMemset(da,0,100*sizeof(float));
cudaMemset(db,0,100*sizeof(float));
float* ha;
float* hb;
cudaMallocHost((void**)&ha, 100*sizeof(float));
cudaMallocHost((void**)&hb, 100*sizeof(float));
memset(ha,0,100*sizeof(float));
memset(hb,0,100*sizeof(float));
cudaBindTexture(0, a_tex, da, 100*sizeof(float));
cudaBindTexture(0, b_tex, db, 100*sizeof(float));
testKernel<<<1,101>>>(da,db);
cudaUnbindTexture(a_tex);
cudaUnbindTexture(b_tex);
cudaMemcpy(ha,da,100*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(hb,db,100*sizeof(float),cudaMemcpyDeviceToHost);
return;
}
|
7e8eb3bfa0cbb413826911dc6a29e0cfdde15719.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file benchmark_lc_select_if_mem_bf.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. in. Marek
* Nacz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
#define BLOCK_TILE_LOAD_V4 1
#include <helper_cuda.h>
#ifdef RD_PROFILE
# include <hip/hip_runtime_api.h>
#endif
#include <iostream>
#include <sstream>
#include <fstream>
#include <iomanip>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include <cmath>
#include <type_traits>
#ifdef RD_USE_OPENMP
#include <omp.h>
#endif
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "rd/utils/memory.h"
#include "rd/utils/name_traits.hpp"
#include "rd/utils/rd_params.hpp"
#include "rd/gpu/block/block_select_if.cuh"
#include "rd/gpu/util/dev_samples_set.cuh"
#include "rd/gpu/util/dev_utilities.cuh"
#include "rd/gpu/util/dev_memcpy.cuh"
#include "tests/test_util.hpp"
#include "cub/test_util.h"
#include "cub/util_device.cuh"
#include "cub/util_type.cuh"
//------------------------------------------------------------
// GLOBAL CONSTANTS / VARIABLES
//------------------------------------------------------------
static const std::string LOG_FILE_NAME_SUFFIX = "_select_if_mbf-timings.txt";
static std::ofstream * g_logFile = nullptr;
static std::string g_devName = "";
static int g_devSMCount = 0;
static int g_devId = 0;
static bool g_logPerfResults = false;
static bool g_startBenchmark = false;
#if defined(RD_PROFILE) || defined(RD_DEBUG)
static const int g_iterations = 1;
#else
static const int g_iterations = 100;
#endif
//------------------------------------------------------------
// Select Op
//------------------------------------------------------------
template <int DIM, typename T>
struct LessThan
{
T val_;
__host__ __device__ __forceinline__ LessThan(T v)
:
val_(v)
{}
__host__ __device__ __forceinline__ bool operator()(T const * point) const
{
for (int d = 0; d < DIM; ++d)
{
if (point[d] >= val_)
return false;
}
return true;
}
};
//------------------------------------------------------------
// KERNEL
//------------------------------------------------------------
/**
* @tparam STORE_TWO_PHASE Whether or not to perform two phase selected items store
* with items compatcion in shmem. Oherwise uses warp-wide store.
*/
template <
typename BlockSelectIfPolicyT,
int DIM,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
typename OffsetT,
typename SampleT,
typename SelectOpT,
bool STORE_TWO_PHASE>
__launch_bounds__ (int(BlockSelectIfPolicyT::BLOCK_THREADS))
static __global__ void selectIfKernel(
SampleT const * d_in,
OffsetT numPoints,
SampleT ** d_selectedItemsPtrs,
OffsetT * d_selectedItemsCnt,
SelectOpT selectOp,
OffsetT inStride,
OffsetT outStride)
{
typedef rd::gpu::BlockSelectIf<
BlockSelectIfPolicyT,
DIM,
INPUT_MEM_LAYOUT,
SelectOpT,
SampleT,
OffsetT,
STORE_TWO_PHASE>
BlockSelectIfT;
if (numPoints == 0)
{
return;
}
__shared__ typename BlockSelectIfT::TempStorage tempStorage;
OffsetT selectedPointsCnt = BlockSelectIfT(tempStorage, d_in, d_selectedItemsPtrs[blockIdx.x],
selectOp).scanRange(0, numPoints, inStride, outStride);
if (threadIdx.x == 0)
{
d_selectedItemsCnt[blockIdx.x] = selectedPointsCnt;
}
}
//------------------------------------------------------------
// KERNEL INVOCATION
//------------------------------------------------------------
struct KernelConfig
{
int blockThreads;
int itemsPerThread;
};
template <
typename OffsetT,
typename SampleT,
typename SelectOpT,
typename PartitionKernelPtrT>
static hipError_t invoke(
SampleT const * d_in,
OffsetT numPoints,
SampleT ** d_selectedItemsPtrs,
OffsetT * d_selectedItemsCnt,
OffsetT inStride,
OffsetT outStride,
SelectOpT selectOp,
hipStream_t stream,
bool debugSynchronous,
PartitionKernelPtrT partitionKernelPtr,
KernelConfig kernelConfig)
{
hipError_t error = hipSuccess;
do
{
// get SM occupancy
int smOccupancy;
if(CubDebug(cub::MaxSmOccupancy(
smOccupancy,
partitionKernelPtr,
kernelConfig.blockThreads)
)) break;
dim3 partitionGridSize(1);
partitionGridSize.x = smOccupancy * g_devSMCount;
if (debugSynchronous)
{
printf("Invoking selectIfKernel<<<%d, %d, 0, %p>>> numPoints: %d, "
"pointsPerThread: %d\n",
partitionGridSize.x, kernelConfig.blockThreads, stream, numPoints,
kernelConfig.itemsPerThread);
}
hipLaunchKernelGGL(( partitionKernelPtr), dim3(partitionGridSize), dim3(kernelConfig.blockThreads), 0, stream,
d_in,
numPoints,
d_selectedItemsPtrs,
d_selectedItemsCnt,
selectOp,
inStride,
outStride);
// Check for failure to launch
if (CubDebug(error = hipPeekAtLastError())) break;
// Sync the stream if specified to flush runtime errors
if (debugSynchronous && (CubDebug(error = cub::SyncStream(stream)))) break;
} while (0);
return error;
}
//------------------------------------------------------------
// KERNEL DISPATCH
//------------------------------------------------------------
template <
int BLOCK_THREADS,
int POINTS_PER_THREAD,
cub::CacheLoadModifier LOAD_MODIFIER,
rd::gpu::BlockTileIOBackend IO_BACKEND,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
int DIM,
bool STORE_TWO_PHASE,
typename SelectOpT,
typename OffsetT,
typename T>
static void dispatchKernel(
T const * d_in,
OffsetT numPoints,
T ** d_selectedItemsPtrs,
OffsetT * d_selectedItemsCnt,
SelectOpT selectOp,
OffsetT inStride,
OffsetT outStride,
int iterations,
bool debugSynchronous = false)
{
typedef rd::gpu::BlockSelectIfPolicy<
BLOCK_THREADS,
POINTS_PER_THREAD,
LOAD_MODIFIER,
IO_BACKEND>
BlockSelectIfPolicyT;
KernelConfig partitionConfig;
partitionConfig.blockThreads = BLOCK_THREADS;
partitionConfig.itemsPerThread = POINTS_PER_THREAD;
auto partitionKernelPtr = selectIfKernel<BlockSelectIfPolicyT, DIM, INPUT_MEM_LAYOUT,
OffsetT, T, SelectOpT, STORE_TWO_PHASE>;
// If we use two-phase store algorithm, which compact's selections in smem, we prefer
// larger smem to L1 cache size.
if (STORE_TWO_PHASE)
{
// set smem/L1 mem configuration
// * - ::hipFuncCachePreferNone: no preference for shared memory or L1 (default)
// * - ::hipFuncCachePreferShared: prefer larger shared memory and smaller L1 cache
// * - ::hipFuncCachePreferL1: prefer larger L1 cache and smaller shared memory
// * - ::hipFuncCachePreferEqual: prefer equal size L1 cache and shared memory
hipFuncSetCacheConfig(partitionKernelPtr, hipFuncCachePreferShared);
if (sizeof(T) == 8)
{
// * - ::hipSharedMemBankSizeDefault: use the device's shared memory
// configuration when launching this function.
// * - ::hipSharedMemBankSizeFourByte: set shared memory bank width to be
// four bytes natively when launching this function.
// * - ::hipSharedMemBankSizeEightByte: set shared memory bank width to be
// eight bytes natively when launching this function.
hipFuncSetSharedMemConfig(partitionKernelPtr, hipSharedMemBankSizeEightByte);
}
}
else
{
hipFuncSetCacheConfig(partitionKernelPtr, hipFuncCachePreferL1);
}
for (int i = 0; i < iterations; ++i)
{
CubDebugExit(invoke(
d_in,
numPoints,
d_selectedItemsPtrs,
d_selectedItemsCnt,
inStride,
outStride,
selectOp,
0,
debugSynchronous,
partitionKernelPtr,
partitionConfig));
}
}
//------------------------------------------------------------
// Test and benchmark specified kernel configuration
//------------------------------------------------------------
template <
int BLOCK_THREADS,
int POINTS_PER_THREAD,
int DIM,
cub::CacheLoadModifier LOAD_MODIFIER,
rd::gpu::BlockTileIOBackend IO_BACKEND,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
bool STORE_TWO_PHASE,
typename T>
struct RunSelectIf
{
typedef LessThan<DIM, T> SelectOpT;
typedef rd::gpu::BlockSelectIfPolicy<
BLOCK_THREADS, POINTS_PER_THREAD, LOAD_MODIFIER, IO_BACKEND>
BlockSelectIfPolicyT;
typedef rd::gpu::BlockSelectIf<
BlockSelectIfPolicyT, DIM, INPUT_MEM_LAYOUT, SelectOpT, T, int, STORE_TWO_PHASE>
BlockSelectIfT;
typedef typename BlockSelectIfT::TempStorage KernelTempStorageT;
typedef typename cub::If<sizeof(KernelTempStorageT) <= 0xc000,
std::true_type, std::false_type>::Type EnableKernelT;
static void impl(
int numPoints,
float compare,
T const * d_in,
int inStride,
int outStride,
T selectRatio,
int selectedPointsCnt)
{
doImpl<EnableKernelT>(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
}
template <
typename EnableT,
typename std::enable_if<
std::is_same<EnableT, std::true_type>::value, int>::type * = nullptr>
static void doImpl(
int numPoints,
float compare,
T const * d_in,
int inStride,
int outStride,
T selectRatio,
int selectedPointsCnt)
{
SelectOpT selectOp(compare);
auto kernelPtr = selectIfKernel<BlockSelectIfPolicyT, DIM, INPUT_MEM_LAYOUT, int, T,
SelectOpT, STORE_TWO_PHASE>;
// get SM occupancy
int smOccupancy;
checkCudaErrors(cub::MaxSmOccupancy(smOccupancy, kernelPtr, BLOCK_THREADS));
int blockCount = 1;
blockCount = smOccupancy * g_devSMCount;
// Allocate device arrays
T ** d_selectedPointsPtrs = nullptr;
T ** h_dSelectedPointsPtrs = new T*[blockCount];
int * d_selectedPointsCnt = nullptr;
checkCudaErrors(hipMalloc(&d_selectedPointsPtrs, blockCount * sizeof(T*)));
checkCudaErrors(hipMalloc(&d_selectedPointsCnt, blockCount * sizeof(int)));
hipStream_t auxStream;
checkCudaErrors(hipStreamCreateWithFlags(&auxStream, hipStreamNonBlocking));
checkCudaErrors(hipMemsetAsync(d_selectedPointsCnt, 0, blockCount * sizeof(int),
auxStream));
for (int k = 0; k < blockCount; ++k)
{
checkCudaErrors(hipMalloc(h_dSelectedPointsPtrs + k,
selectedPointsCnt * DIM * sizeof(T)));
checkCudaErrors(hipMemsetAsync(h_dSelectedPointsPtrs[k], 0,
selectedPointsCnt * DIM * sizeof(T), auxStream));
}
// Initialize device input
checkCudaErrors(hipMemcpy(d_selectedPointsPtrs, h_dSelectedPointsPtrs,
blockCount * sizeof(T*), hipMemcpyHostToDevice));
checkCudaErrors(hipDeviceSynchronize());
// Run warm-up/correctness iteration
dispatchKernel<BLOCK_THREADS, POINTS_PER_THREAD, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT,
DIM, STORE_TWO_PHASE>(
d_in, numPoints, d_selectedPointsPtrs, d_selectedPointsCnt, selectOp, inStride,
outStride, 1);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// Measure performance
GpuTimer timer;
float elapsedMillis;
#ifdef RD_PROFILE
hipProfilerStart();
#endif
timer.Start();
dispatchKernel<BLOCK_THREADS, POINTS_PER_THREAD, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT,
DIM, STORE_TWO_PHASE>(
d_in, numPoints, d_selectedPointsPtrs, d_selectedPointsCnt, selectOp, inStride,
outStride, g_iterations);
timer.Stop();
elapsedMillis = timer.ElapsedMillis();
#ifdef RD_PROFILE
hipProfilerStop();
#endif
float avgMillis = elapsedMillis / g_iterations;
int selItemsCnt = selectedPointsCnt * DIM * blockCount;
// every block scans entire data set
unsigned long long int numBytes = sizeof(T) * blockCount * numPoints * DIM +
// storing selectedItems counters
blockCount * sizeof(int) +
// storing selected items
selItemsCnt * sizeof(T);
// conversion to GB/s
float gigaBandwidth = float(numBytes) / avgMillis / 1000.0f / 1000.0f;
KernelResourceUsage resUsage(kernelPtr, BLOCK_THREADS);
if (g_logPerfResults)
{
logValues(*g_logFile, DIM, BLOCK_THREADS, POINTS_PER_THREAD,
std::string(rd::LoadModifierNameTraits<LOAD_MODIFIER>::name),
std::string(rd::BlockTileIONameTraits<IO_BACKEND>::name),
std::string(rd::DataMemoryLayoutNameTraits<INPUT_MEM_LAYOUT>::name),
(STORE_TWO_PHASE) ? "true" : "false",
selectRatio, avgMillis, gigaBandwidth,
resUsage.prettyPrint());
*g_logFile << std::endl;
}
logValues(std::cout, DIM, BLOCK_THREADS, POINTS_PER_THREAD,
std::string(rd::LoadModifierNameTraits<LOAD_MODIFIER>::name),
std::string(rd::BlockTileIONameTraits<IO_BACKEND>::name),
std::string(rd::DataMemoryLayoutNameTraits<INPUT_MEM_LAYOUT>::name),
(STORE_TWO_PHASE) ? "true" : "false",
selectRatio, avgMillis, gigaBandwidth,
resUsage.prettyPrint());
std::cout << std::endl;
// cleanup
checkCudaErrors(hipStreamDestroy(auxStream));
for (int k = 0; k < blockCount; ++k)
{
if (h_dSelectedPointsPtrs[k]) checkCudaErrors(hipFree(h_dSelectedPointsPtrs[k]));
}
if (d_selectedPointsPtrs) checkCudaErrors(hipFree(d_selectedPointsPtrs));
if (d_selectedPointsCnt) checkCudaErrors(hipFree(d_selectedPointsCnt));
if (h_dSelectedPointsPtrs) delete[] h_dSelectedPointsPtrs;
}
template <
typename EnableT,
typename std::enable_if<
std::is_same<EnableT, std::false_type>::value, int>::type * = nullptr>
static void doImpl(
int ,
float ,
T const * ,
int ,
int ,
T ,
int )
{
}
};
//------------------------------------------------------------
// Test kernel block-threads / items-per-thread configurations
//------------------------------------------------------------
/*
* Test different kernel configurations (block size, points per thread)
*/
template <
int DIM,
cub::CacheLoadModifier LOAD_MODIFIER,
rd::gpu::BlockTileIOBackend IO_BACKEND,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
bool STORE_TWO_PHASE,
typename T>
void iterateKernelConf(
int numPoints,
float compare,
T const * d_in,
int inStride,
int outStride,
T selectRatio,
int selectedPointsCnt)
{
#ifdef QUICK_TEST
RunSelectIf<RD_BLOCK_SIZE, 4, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
#else
RunSelectIf<RD_BLOCK_SIZE, 1, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 2, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 3, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 4, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 5, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 6, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 7, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 8, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 9, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE,10, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE,11, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE,12, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
#endif
}
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem
*/
template <typename T>
static void Initialize(
T* h_in,
int numItems)
{
rd::fillRandomDataTable(h_in, numItems, T(0), T(126));
}
/**
* @brief Create if necessary and open log file. Allocate log file stream.
*/
template <typename T>
static void initializeLogFile()
{
if (g_logPerfResults)
{
std::ostringstream logFileName;
// append device name to log file
logFileName << typeid(T).name() << "_" << getCurrDate() << "_" << g_devName << "_"
<< LOG_FILE_NAME_SUFFIX;
std::string logFilePath = rd::findPath("../timings/", logFileName.str());
g_logFile = new std::ofstream(logFilePath.c_str(), std::ios::out | std::ios::app);
if (g_logFile->fail())
{
throw std::logic_error("Couldn't open file: " + logFileName.str());
}
// legend
if (g_startBenchmark)
{
*g_logFile << "% ";
logValue(*g_logFile, "dim", 10);
logValue(*g_logFile, "blockSize", 10);
logValue(*g_logFile, "itmPerThr", 10);
logValue(*g_logFile, "loadModif", 10);
logValue(*g_logFile, "ioBackend", 16);
logValue(*g_logFile, "inMemLout", 10);
logValue(*g_logFile, "twoPhase", 10);
logValue(*g_logFile, "selRatio", 10);
logValue(*g_logFile, "avgMillis", 12);
logValue(*g_logFile, "GBytes", 12);
logValue(*g_logFile, "resUsage", 10);
*g_logFile << "\n";
}
}
}
/**
* Reference selection problem solution.
*/
template <
int DIM,
typename T,
typename SelectOpT>
static int solve(
T const * h_in,
int numPoints,
SelectOpT selectOp)
{
int selectedPointsCnt = 0;
#ifdef RD_USE_OPENMP
#pragma omp parallel for schedule(static) reduction(+:selectedPointsCnt)
#endif
for (int k = 0; k < numPoints; ++k)
{
T const * point = h_in + k * DIM;
if (selectOp(point))
{
selectedPointsCnt++;
}
}
return selectedPointsCnt;
}
/**
* @brief Prepare and run test. Allocate and initialize test input data.
*/
template <
int DIM,
cub::CacheLoadModifier LOAD_MODIFIER,
rd::gpu::BlockTileIOBackend IO_BACKEND,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
bool STORE_TWO_PHASE,
typename T>
void prepareAndRunTest(
T * d_in,
int numPoints,
float selectRatio,
int selectedPointsCnt,
T compareItem)
{
int inDataStride = (INPUT_MEM_LAYOUT == rd::COL_MAJOR) ? numPoints : 1;
int outDataStride = (INPUT_MEM_LAYOUT == rd::COL_MAJOR) ? selectedPointsCnt : 1;
// Run test kernel configurations
iterateKernelConf<DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE>(
numPoints, compareItem, d_in, inDataStride, outDataStride, selectRatio,
selectedPointsCnt);
}
template <
int DIM,
cub::CacheLoadModifier LOAD_MODIFIER,
rd::gpu::BlockTileIOBackend IO_BACKEND,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
typename T>
void testStoreAlgorithm(
T * d_in,
int numPoints,
float selectRatio,
int selectedPointsCnt,
T compareItem)
{
/**
* store two-phase (with selected items compaction in smem)
*/
// prepareAndRunTest<DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, true, T>(
// d_in, numPoints, selectRatio, selectedPointsCnt, compareItem);
/**
* store using warp-aggregated algorithm
*/
prepareAndRunTest<DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, false, T>(
d_in, numPoints, selectRatio, selectedPointsCnt, compareItem);
}
template <
int DIM,
rd::gpu::BlockTileIOBackend IO_BACKEND,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
typename T>
void testLoadModifier(
T * d_in,
int numPoints,
float selectRatio,
int selectedPointsCnt,
T compareItem)
{
testStoreAlgorithm<DIM, cub::LOAD_LDG, IO_BACKEND, INPUT_MEM_LAYOUT, T>(d_in, numPoints,
selectRatio, selectedPointsCnt, compareItem);
// Cache streaming (likely to be accessed once)
testStoreAlgorithm<DIM, cub::LOAD_CG, IO_BACKEND, INPUT_MEM_LAYOUT, T>(d_in, numPoints,
selectRatio, selectedPointsCnt, compareItem);
}
template <
int DIM,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
typename T>
void testIOBackend(
T * d_in,
int numPoints,
float selectRatio,
int selectedPointsCnt,
T compareItem)
{
testLoadModifier<DIM, rd::gpu::IO_BACKEND_CUB, INPUT_MEM_LAYOUT, T>(d_in, numPoints,
selectRatio, selectedPointsCnt, compareItem);
/**
* 06.06.2016 Trove version causes misalinged address errors while storing data from smem.
*/
// testLoadModifier<DIM, rd::gpu::IO_BACKEND_TROVE, INPUT_MEM_LAYOUT, T>(d_in, numPoints,
// selectRatio, selectedPointsCnt, compareItem);
}
template <
int DIM,
typename T>
void testInputMemLayout(
int numPoints,
float selectRatio)
{
if (g_logPerfResults)
{
*g_logFile << "% pointsNum=" << numPoints << std::endl;
}
// allocate host arrays
T * h_in = new T[numPoints * DIM];
// Initialize input
Initialize(h_in, numPoints * DIM);
// Select a comparison value that is selectRatio through the space of [0,127]
T compareItem;
if (selectRatio <= 0.0)
{
compareItem = 0; // select none
}
else if (selectRatio >= 1.0)
{
compareItem = 127; // select all
}
else
{
compareItem = int(T(T(127) * selectRatio));
}
LessThan<DIM, T> selectOp(compareItem);
int selectedPointsCnt = solve<DIM>(h_in, numPoints, selectOp);
// Allocate device arrays
T * d_in = nullptr;
checkCudaErrors(hipMalloc(&d_in, numPoints * DIM * sizeof(T)));
rd::gpu::rdMemcpy<rd::ROW_MAJOR, rd::ROW_MAJOR, hipMemcpyHostToDevice>(
d_in, h_in, DIM, numPoints, DIM, DIM);
testIOBackend<DIM, rd::ROW_MAJOR, T>(d_in, numPoints, selectRatio, selectedPointsCnt,
compareItem);
rd::gpu::rdMemcpy<rd::COL_MAJOR, rd::ROW_MAJOR, hipMemcpyHostToDevice>(
d_in, h_in, DIM, numPoints, numPoints, DIM);
testIOBackend<DIM, rd::COL_MAJOR, T>(d_in, numPoints, selectRatio, selectedPointsCnt,
compareItem);
// cleanup
if (h_in) delete[] h_in;
if (d_in) checkCudaErrors(hipFree(d_in));
}
template <
typename T>
void testDim(
int numPoints,
T selectRatio)
{
if (g_logPerfResults)
{
initializeLogFile<T>();
}
#if defined(RD_DEBUG) || defined(RD_PROFILE) || defined(QUICK_TEST)
std::cout << "\nTest DIM 2...\n\n";
testInputMemLayout<10, T>(numPoints, selectRatio);
#else
std::cout << "\nTest DIM 1...\n\n";
testInputMemLayout<1, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 2...\n\n";
testInputMemLayout<2, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 3...\n\n";
testInputMemLayout<3, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 4...\n\n";
testInputMemLayout<4, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 5...\n\n";
testInputMemLayout<5, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 6...\n\n";
testInputMemLayout<6, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 7...\n\n";
testInputMemLayout<7, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 8...\n\n";
testInputMemLayout<8, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 9...\n\n";
testInputMemLayout<9, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 10...\n\n";
testInputMemLayout<10, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 11...\n\n";
testInputMemLayout<11, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 12...\n\n";
testInputMemLayout<12, T>(numPoints, selectRatio);
#endif
if (g_logPerfResults)
{
if (g_logFile) delete g_logFile;
}
}
int main(int argc, char const **argv)
{
int numPoints = -1;
float selectRatio = -1.f;
// Initialize command line
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t--np=<number of input points>\n"
"\t\t[--d=<device id>]\n"
"\t\t[--ratio=<selection ratio, default 0.5>]\n"
"\t\t[--log <log performance results>]\n"
"\t\t[--start <mark start of benchmark in log file>]\n"
// "\t\t[--end <mark end of benchmark in log file>]\n"
"\n", argv[0]);
exit(0);
}
args.GetCmdLineArgument("np", numPoints);
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", g_devId);
}
if (args.CheckCmdLineFlag("ratio"))
{
args.GetCmdLineArgument("ratio", selectRatio);
}
if (args.CheckCmdLineFlag("log"))
{
g_logPerfResults = true;
}
if (args.CheckCmdLineFlag("start"))
{
g_startBenchmark = true;
}
// if (args.CheckCmdLineFlag("end"))
// {
// g_endBenchmark = true;
// }
checkCudaErrors(deviceInit(g_devId));
// set device name for logging and drawing purposes
hipDeviceProp_t devProp;
checkCudaErrors(hipGetDeviceProperties(&devProp, g_devId));
g_devName = devProp.name;
// read device SM count and determine max number of resident blocks per SM
g_devSMCount = devProp.multiProcessorCount;
//-----------------------------------------
// TESTS
//-----------------------------------------
if (numPoints < 0 ||
selectRatio < 0)
{
std::cout << "Have to specify parameters! Rerun with --help for more "
"informations.\n";
exit(1);
}
#ifdef QUICK_TEST
const int DIM = 10;
// allocate host arrays
float * h_in = new float[numPoints * DIM];
// Initialize input
Initialize(h_in, numPoints * DIM);
if (g_logPerfResults)
{
initializeLogFile<float>();
}
// Select a comparison value that is selectRatio through the space of [0,127]
float compareItem;
if (selectRatio <= 0.0)
{
compareItem = 0; // select none
}
else if (selectRatio >= 1.0)
{
compareItem = 127; // select all
}
else
{
compareItem = int(float(float(127) * selectRatio));
}
LessThan<DIM, float> selectOp(compareItem);
int selectedPointsCnt = solve<DIM>(h_in, numPoints, selectOp);
// Allocate device arrays
float * d_in = nullptr;
checkCudaErrors(hipMalloc(&d_in, numPoints * DIM * sizeof(float)));
rd::gpu::rdMemcpy<rd::ROW_MAJOR, rd::ROW_MAJOR, hipMemcpyHostToDevice>(
d_in, h_in, DIM, numPoints, DIM, DIM);
prepareAndRunTest<DIM, cub::LOAD_LDG, rd::gpu::IO_BACKEND_CUB, rd::ROW_MAJOR, true>(
d_in, numPoints, selectRatio, selectedPointsCnt, compareItem);
prepareAndRunTest<DIM, cub::LOAD_LDG, rd::gpu::IO_BACKEND_CUB, rd::ROW_MAJOR, false>(
d_in, numPoints, selectRatio, selectedPointsCnt, compareItem);
rd::gpu::rdMemcpy<rd::COL_MAJOR, rd::ROW_MAJOR, hipMemcpyHostToDevice>(
d_in, h_in, DIM, numPoints, numPoints, DIM);
prepareAndRunTest<DIM, cub::LOAD_LDG, rd::gpu::IO_BACKEND_CUB, rd::COL_MAJOR, true>(
d_in, numPoints, selectRatio, selectedPointsCnt, compareItem);
prepareAndRunTest<DIM, cub::LOAD_LDG, rd::gpu::IO_BACKEND_CUB, rd::COL_MAJOR, false>(
d_in, numPoints, selectRatio, selectedPointsCnt, compareItem);
std::cout << rd::HLINE << std::endl;
// cleanup
if (h_in) delete[] h_in;
if (d_in) checkCudaErrors(hipFree(d_in));
#else
#ifndef RD_TEST_DBL_PREC
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT: " << std::endl;
testDim<float>(numPoints, selectRatio);
#else
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE: " << std::endl;
testDim<double>(numPoints, selectRatio);
#endif
std::cout << rd::HLINE << std::endl;
#endif
checkCudaErrors(deviceReset());
std::cout << "END!" << std::endl;
return 0;
}
| 7e8eb3bfa0cbb413826911dc6a29e0cfdde15719.cu | /**
* @file benchmark_lc_select_if_mem_bf.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. inż. Marek
* Nałęcz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
#define BLOCK_TILE_LOAD_V4 1
#include <helper_cuda.h>
#ifdef RD_PROFILE
# include <cuda_profiler_api.h>
#endif
#include <iostream>
#include <sstream>
#include <fstream>
#include <iomanip>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include <cmath>
#include <type_traits>
#ifdef RD_USE_OPENMP
#include <omp.h>
#endif
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "rd/utils/memory.h"
#include "rd/utils/name_traits.hpp"
#include "rd/utils/rd_params.hpp"
#include "rd/gpu/block/block_select_if.cuh"
#include "rd/gpu/util/dev_samples_set.cuh"
#include "rd/gpu/util/dev_utilities.cuh"
#include "rd/gpu/util/dev_memcpy.cuh"
#include "tests/test_util.hpp"
#include "cub/test_util.h"
#include "cub/util_device.cuh"
#include "cub/util_type.cuh"
//------------------------------------------------------------
// GLOBAL CONSTANTS / VARIABLES
//------------------------------------------------------------
static const std::string LOG_FILE_NAME_SUFFIX = "_select_if_mbf-timings.txt";
static std::ofstream * g_logFile = nullptr;
static std::string g_devName = "";
static int g_devSMCount = 0;
static int g_devId = 0;
static bool g_logPerfResults = false;
static bool g_startBenchmark = false;
#if defined(RD_PROFILE) || defined(RD_DEBUG)
static const int g_iterations = 1;
#else
static const int g_iterations = 100;
#endif
//------------------------------------------------------------
// Select Op
//------------------------------------------------------------
template <int DIM, typename T>
struct LessThan
{
T val_;
__host__ __device__ __forceinline__ LessThan(T v)
:
val_(v)
{}
__host__ __device__ __forceinline__ bool operator()(T const * point) const
{
for (int d = 0; d < DIM; ++d)
{
if (point[d] >= val_)
return false;
}
return true;
}
};
//------------------------------------------------------------
// KERNEL
//------------------------------------------------------------
/**
* @tparam STORE_TWO_PHASE Whether or not to perform two phase selected items store
* with items compatcion in shmem. Oherwise uses warp-wide store.
*/
template <
typename BlockSelectIfPolicyT,
int DIM,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
typename OffsetT,
typename SampleT,
typename SelectOpT,
bool STORE_TWO_PHASE>
__launch_bounds__ (int(BlockSelectIfPolicyT::BLOCK_THREADS))
static __global__ void selectIfKernel(
SampleT const * d_in,
OffsetT numPoints,
SampleT ** d_selectedItemsPtrs,
OffsetT * d_selectedItemsCnt,
SelectOpT selectOp,
OffsetT inStride,
OffsetT outStride)
{
typedef rd::gpu::BlockSelectIf<
BlockSelectIfPolicyT,
DIM,
INPUT_MEM_LAYOUT,
SelectOpT,
SampleT,
OffsetT,
STORE_TWO_PHASE>
BlockSelectIfT;
if (numPoints == 0)
{
return;
}
__shared__ typename BlockSelectIfT::TempStorage tempStorage;
OffsetT selectedPointsCnt = BlockSelectIfT(tempStorage, d_in, d_selectedItemsPtrs[blockIdx.x],
selectOp).scanRange(0, numPoints, inStride, outStride);
if (threadIdx.x == 0)
{
d_selectedItemsCnt[blockIdx.x] = selectedPointsCnt;
}
}
//------------------------------------------------------------
// KERNEL INVOCATION
//------------------------------------------------------------
struct KernelConfig
{
int blockThreads;
int itemsPerThread;
};
template <
typename OffsetT,
typename SampleT,
typename SelectOpT,
typename PartitionKernelPtrT>
static cudaError_t invoke(
SampleT const * d_in,
OffsetT numPoints,
SampleT ** d_selectedItemsPtrs,
OffsetT * d_selectedItemsCnt,
OffsetT inStride,
OffsetT outStride,
SelectOpT selectOp,
cudaStream_t stream,
bool debugSynchronous,
PartitionKernelPtrT partitionKernelPtr,
KernelConfig kernelConfig)
{
cudaError error = cudaSuccess;
do
{
// get SM occupancy
int smOccupancy;
if(CubDebug(cub::MaxSmOccupancy(
smOccupancy,
partitionKernelPtr,
kernelConfig.blockThreads)
)) break;
dim3 partitionGridSize(1);
partitionGridSize.x = smOccupancy * g_devSMCount;
if (debugSynchronous)
{
printf("Invoking selectIfKernel<<<%d, %d, 0, %p>>> numPoints: %d, "
"pointsPerThread: %d\n",
partitionGridSize.x, kernelConfig.blockThreads, stream, numPoints,
kernelConfig.itemsPerThread);
}
partitionKernelPtr<<<partitionGridSize, kernelConfig.blockThreads, 0, stream>>>(
d_in,
numPoints,
d_selectedItemsPtrs,
d_selectedItemsCnt,
selectOp,
inStride,
outStride);
// Check for failure to launch
if (CubDebug(error = cudaPeekAtLastError())) break;
// Sync the stream if specified to flush runtime errors
if (debugSynchronous && (CubDebug(error = cub::SyncStream(stream)))) break;
} while (0);
return error;
}
//------------------------------------------------------------
// KERNEL DISPATCH
//------------------------------------------------------------
template <
int BLOCK_THREADS,
int POINTS_PER_THREAD,
cub::CacheLoadModifier LOAD_MODIFIER,
rd::gpu::BlockTileIOBackend IO_BACKEND,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
int DIM,
bool STORE_TWO_PHASE,
typename SelectOpT,
typename OffsetT,
typename T>
static void dispatchKernel(
T const * d_in,
OffsetT numPoints,
T ** d_selectedItemsPtrs,
OffsetT * d_selectedItemsCnt,
SelectOpT selectOp,
OffsetT inStride,
OffsetT outStride,
int iterations,
bool debugSynchronous = false)
{
typedef rd::gpu::BlockSelectIfPolicy<
BLOCK_THREADS,
POINTS_PER_THREAD,
LOAD_MODIFIER,
IO_BACKEND>
BlockSelectIfPolicyT;
KernelConfig partitionConfig;
partitionConfig.blockThreads = BLOCK_THREADS;
partitionConfig.itemsPerThread = POINTS_PER_THREAD;
auto partitionKernelPtr = selectIfKernel<BlockSelectIfPolicyT, DIM, INPUT_MEM_LAYOUT,
OffsetT, T, SelectOpT, STORE_TWO_PHASE>;
// If we use two-phase store algorithm, which compact's selections in smem, we prefer
// larger smem to L1 cache size.
if (STORE_TWO_PHASE)
{
// set smem/L1 mem configuration
// * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default)
// * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache
// * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory
// * - ::cudaFuncCachePreferEqual: prefer equal size L1 cache and shared memory
cudaFuncSetCacheConfig(partitionKernelPtr, cudaFuncCachePreferShared);
if (sizeof(T) == 8)
{
// * - ::cudaSharedMemBankSizeDefault: use the device's shared memory
// configuration when launching this function.
// * - ::cudaSharedMemBankSizeFourByte: set shared memory bank width to be
// four bytes natively when launching this function.
// * - ::cudaSharedMemBankSizeEightByte: set shared memory bank width to be
// eight bytes natively when launching this function.
cudaFuncSetSharedMemConfig(partitionKernelPtr, cudaSharedMemBankSizeEightByte);
}
}
else
{
cudaFuncSetCacheConfig(partitionKernelPtr, cudaFuncCachePreferL1);
}
for (int i = 0; i < iterations; ++i)
{
CubDebugExit(invoke(
d_in,
numPoints,
d_selectedItemsPtrs,
d_selectedItemsCnt,
inStride,
outStride,
selectOp,
0,
debugSynchronous,
partitionKernelPtr,
partitionConfig));
}
}
//------------------------------------------------------------
// Test and benchmark specified kernel configuration
//------------------------------------------------------------
template <
int BLOCK_THREADS,
int POINTS_PER_THREAD,
int DIM,
cub::CacheLoadModifier LOAD_MODIFIER,
rd::gpu::BlockTileIOBackend IO_BACKEND,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
bool STORE_TWO_PHASE,
typename T>
struct RunSelectIf
{
typedef LessThan<DIM, T> SelectOpT;
typedef rd::gpu::BlockSelectIfPolicy<
BLOCK_THREADS, POINTS_PER_THREAD, LOAD_MODIFIER, IO_BACKEND>
BlockSelectIfPolicyT;
typedef rd::gpu::BlockSelectIf<
BlockSelectIfPolicyT, DIM, INPUT_MEM_LAYOUT, SelectOpT, T, int, STORE_TWO_PHASE>
BlockSelectIfT;
typedef typename BlockSelectIfT::TempStorage KernelTempStorageT;
typedef typename cub::If<sizeof(KernelTempStorageT) <= 0xc000,
std::true_type, std::false_type>::Type EnableKernelT;
static void impl(
int numPoints,
float compare,
T const * d_in,
int inStride,
int outStride,
T selectRatio,
int selectedPointsCnt)
{
doImpl<EnableKernelT>(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
}
template <
typename EnableT,
typename std::enable_if<
std::is_same<EnableT, std::true_type>::value, int>::type * = nullptr>
static void doImpl(
int numPoints,
float compare,
T const * d_in,
int inStride,
int outStride,
T selectRatio,
int selectedPointsCnt)
{
SelectOpT selectOp(compare);
auto kernelPtr = selectIfKernel<BlockSelectIfPolicyT, DIM, INPUT_MEM_LAYOUT, int, T,
SelectOpT, STORE_TWO_PHASE>;
// get SM occupancy
int smOccupancy;
checkCudaErrors(cub::MaxSmOccupancy(smOccupancy, kernelPtr, BLOCK_THREADS));
int blockCount = 1;
blockCount = smOccupancy * g_devSMCount;
// Allocate device arrays
T ** d_selectedPointsPtrs = nullptr;
T ** h_dSelectedPointsPtrs = new T*[blockCount];
int * d_selectedPointsCnt = nullptr;
checkCudaErrors(cudaMalloc(&d_selectedPointsPtrs, blockCount * sizeof(T*)));
checkCudaErrors(cudaMalloc(&d_selectedPointsCnt, blockCount * sizeof(int)));
cudaStream_t auxStream;
checkCudaErrors(cudaStreamCreateWithFlags(&auxStream, cudaStreamNonBlocking));
checkCudaErrors(cudaMemsetAsync(d_selectedPointsCnt, 0, blockCount * sizeof(int),
auxStream));
for (int k = 0; k < blockCount; ++k)
{
checkCudaErrors(cudaMalloc(h_dSelectedPointsPtrs + k,
selectedPointsCnt * DIM * sizeof(T)));
checkCudaErrors(cudaMemsetAsync(h_dSelectedPointsPtrs[k], 0,
selectedPointsCnt * DIM * sizeof(T), auxStream));
}
// Initialize device input
checkCudaErrors(cudaMemcpy(d_selectedPointsPtrs, h_dSelectedPointsPtrs,
blockCount * sizeof(T*), cudaMemcpyHostToDevice));
checkCudaErrors(cudaDeviceSynchronize());
// Run warm-up/correctness iteration
dispatchKernel<BLOCK_THREADS, POINTS_PER_THREAD, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT,
DIM, STORE_TWO_PHASE>(
d_in, numPoints, d_selectedPointsPtrs, d_selectedPointsCnt, selectOp, inStride,
outStride, 1);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// Measure performance
GpuTimer timer;
float elapsedMillis;
#ifdef RD_PROFILE
cudaProfilerStart();
#endif
timer.Start();
dispatchKernel<BLOCK_THREADS, POINTS_PER_THREAD, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT,
DIM, STORE_TWO_PHASE>(
d_in, numPoints, d_selectedPointsPtrs, d_selectedPointsCnt, selectOp, inStride,
outStride, g_iterations);
timer.Stop();
elapsedMillis = timer.ElapsedMillis();
#ifdef RD_PROFILE
cudaProfilerStop();
#endif
float avgMillis = elapsedMillis / g_iterations;
int selItemsCnt = selectedPointsCnt * DIM * blockCount;
// every block scans entire data set
unsigned long long int numBytes = sizeof(T) * blockCount * numPoints * DIM +
// storing selectedItems counters
blockCount * sizeof(int) +
// storing selected items
selItemsCnt * sizeof(T);
// conversion to GB/s
float gigaBandwidth = float(numBytes) / avgMillis / 1000.0f / 1000.0f;
KernelResourceUsage resUsage(kernelPtr, BLOCK_THREADS);
if (g_logPerfResults)
{
logValues(*g_logFile, DIM, BLOCK_THREADS, POINTS_PER_THREAD,
std::string(rd::LoadModifierNameTraits<LOAD_MODIFIER>::name),
std::string(rd::BlockTileIONameTraits<IO_BACKEND>::name),
std::string(rd::DataMemoryLayoutNameTraits<INPUT_MEM_LAYOUT>::name),
(STORE_TWO_PHASE) ? "true" : "false",
selectRatio, avgMillis, gigaBandwidth,
resUsage.prettyPrint());
*g_logFile << std::endl;
}
logValues(std::cout, DIM, BLOCK_THREADS, POINTS_PER_THREAD,
std::string(rd::LoadModifierNameTraits<LOAD_MODIFIER>::name),
std::string(rd::BlockTileIONameTraits<IO_BACKEND>::name),
std::string(rd::DataMemoryLayoutNameTraits<INPUT_MEM_LAYOUT>::name),
(STORE_TWO_PHASE) ? "true" : "false",
selectRatio, avgMillis, gigaBandwidth,
resUsage.prettyPrint());
std::cout << std::endl;
// cleanup
checkCudaErrors(cudaStreamDestroy(auxStream));
for (int k = 0; k < blockCount; ++k)
{
if (h_dSelectedPointsPtrs[k]) checkCudaErrors(cudaFree(h_dSelectedPointsPtrs[k]));
}
if (d_selectedPointsPtrs) checkCudaErrors(cudaFree(d_selectedPointsPtrs));
if (d_selectedPointsCnt) checkCudaErrors(cudaFree(d_selectedPointsCnt));
if (h_dSelectedPointsPtrs) delete[] h_dSelectedPointsPtrs;
}
template <
typename EnableT,
typename std::enable_if<
std::is_same<EnableT, std::false_type>::value, int>::type * = nullptr>
static void doImpl(
int ,
float ,
T const * ,
int ,
int ,
T ,
int )
{
}
};
//------------------------------------------------------------
// Test kernel block-threads / items-per-thread configurations
//------------------------------------------------------------
/*
* Test different kernel configurations (block size, points per thread)
*/
template <
int DIM,
cub::CacheLoadModifier LOAD_MODIFIER,
rd::gpu::BlockTileIOBackend IO_BACKEND,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
bool STORE_TWO_PHASE,
typename T>
void iterateKernelConf(
int numPoints,
float compare,
T const * d_in,
int inStride,
int outStride,
T selectRatio,
int selectedPointsCnt)
{
#ifdef QUICK_TEST
RunSelectIf<RD_BLOCK_SIZE, 4, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
#else
RunSelectIf<RD_BLOCK_SIZE, 1, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 2, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 3, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 4, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 5, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 6, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 7, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 8, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE, 9, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE,10, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE,11, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
RunSelectIf<RD_BLOCK_SIZE,12, DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE,
T>::impl(numPoints, compare, d_in, inStride, outStride, selectRatio, selectedPointsCnt);
#endif
}
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem
*/
template <typename T>
static void Initialize(
T* h_in,
int numItems)
{
rd::fillRandomDataTable(h_in, numItems, T(0), T(126));
}
/**
* @brief Create if necessary and open log file. Allocate log file stream.
*/
template <typename T>
static void initializeLogFile()
{
if (g_logPerfResults)
{
std::ostringstream logFileName;
// append device name to log file
logFileName << typeid(T).name() << "_" << getCurrDate() << "_" << g_devName << "_"
<< LOG_FILE_NAME_SUFFIX;
std::string logFilePath = rd::findPath("../timings/", logFileName.str());
g_logFile = new std::ofstream(logFilePath.c_str(), std::ios::out | std::ios::app);
if (g_logFile->fail())
{
throw std::logic_error("Couldn't open file: " + logFileName.str());
}
// legend
if (g_startBenchmark)
{
*g_logFile << "% ";
logValue(*g_logFile, "dim", 10);
logValue(*g_logFile, "blockSize", 10);
logValue(*g_logFile, "itmPerThr", 10);
logValue(*g_logFile, "loadModif", 10);
logValue(*g_logFile, "ioBackend", 16);
logValue(*g_logFile, "inMemLout", 10);
logValue(*g_logFile, "twoPhase", 10);
logValue(*g_logFile, "selRatio", 10);
logValue(*g_logFile, "avgMillis", 12);
logValue(*g_logFile, "GBytes", 12);
logValue(*g_logFile, "resUsage", 10);
*g_logFile << "\n";
}
}
}
/**
* Reference selection problem solution.
*/
template <
int DIM,
typename T,
typename SelectOpT>
static int solve(
T const * h_in,
int numPoints,
SelectOpT selectOp)
{
int selectedPointsCnt = 0;
#ifdef RD_USE_OPENMP
#pragma omp parallel for schedule(static) reduction(+:selectedPointsCnt)
#endif
for (int k = 0; k < numPoints; ++k)
{
T const * point = h_in + k * DIM;
if (selectOp(point))
{
selectedPointsCnt++;
}
}
return selectedPointsCnt;
}
/**
* @brief Prepare and run test. Allocate and initialize test input data.
*/
template <
int DIM,
cub::CacheLoadModifier LOAD_MODIFIER,
rd::gpu::BlockTileIOBackend IO_BACKEND,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
bool STORE_TWO_PHASE,
typename T>
void prepareAndRunTest(
T * d_in,
int numPoints,
float selectRatio,
int selectedPointsCnt,
T compareItem)
{
int inDataStride = (INPUT_MEM_LAYOUT == rd::COL_MAJOR) ? numPoints : 1;
int outDataStride = (INPUT_MEM_LAYOUT == rd::COL_MAJOR) ? selectedPointsCnt : 1;
// Run test kernel configurations
iterateKernelConf<DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, STORE_TWO_PHASE>(
numPoints, compareItem, d_in, inDataStride, outDataStride, selectRatio,
selectedPointsCnt);
}
template <
int DIM,
cub::CacheLoadModifier LOAD_MODIFIER,
rd::gpu::BlockTileIOBackend IO_BACKEND,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
typename T>
void testStoreAlgorithm(
T * d_in,
int numPoints,
float selectRatio,
int selectedPointsCnt,
T compareItem)
{
/**
* store two-phase (with selected items compaction in smem)
*/
// prepareAndRunTest<DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, true, T>(
// d_in, numPoints, selectRatio, selectedPointsCnt, compareItem);
/**
* store using warp-aggregated algorithm
*/
prepareAndRunTest<DIM, LOAD_MODIFIER, IO_BACKEND, INPUT_MEM_LAYOUT, false, T>(
d_in, numPoints, selectRatio, selectedPointsCnt, compareItem);
}
template <
int DIM,
rd::gpu::BlockTileIOBackend IO_BACKEND,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
typename T>
void testLoadModifier(
T * d_in,
int numPoints,
float selectRatio,
int selectedPointsCnt,
T compareItem)
{
testStoreAlgorithm<DIM, cub::LOAD_LDG, IO_BACKEND, INPUT_MEM_LAYOUT, T>(d_in, numPoints,
selectRatio, selectedPointsCnt, compareItem);
// Cache streaming (likely to be accessed once)
testStoreAlgorithm<DIM, cub::LOAD_CG, IO_BACKEND, INPUT_MEM_LAYOUT, T>(d_in, numPoints,
selectRatio, selectedPointsCnt, compareItem);
}
template <
int DIM,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
typename T>
void testIOBackend(
T * d_in,
int numPoints,
float selectRatio,
int selectedPointsCnt,
T compareItem)
{
testLoadModifier<DIM, rd::gpu::IO_BACKEND_CUB, INPUT_MEM_LAYOUT, T>(d_in, numPoints,
selectRatio, selectedPointsCnt, compareItem);
/**
* 06.06.2016 Trove version causes misalinged address errors while storing data from smem.
*/
// testLoadModifier<DIM, rd::gpu::IO_BACKEND_TROVE, INPUT_MEM_LAYOUT, T>(d_in, numPoints,
// selectRatio, selectedPointsCnt, compareItem);
}
template <
int DIM,
typename T>
void testInputMemLayout(
int numPoints,
float selectRatio)
{
if (g_logPerfResults)
{
*g_logFile << "% pointsNum=" << numPoints << std::endl;
}
// allocate host arrays
T * h_in = new T[numPoints * DIM];
// Initialize input
Initialize(h_in, numPoints * DIM);
// Select a comparison value that is selectRatio through the space of [0,127]
T compareItem;
if (selectRatio <= 0.0)
{
compareItem = 0; // select none
}
else if (selectRatio >= 1.0)
{
compareItem = 127; // select all
}
else
{
compareItem = int(T(T(127) * selectRatio));
}
LessThan<DIM, T> selectOp(compareItem);
int selectedPointsCnt = solve<DIM>(h_in, numPoints, selectOp);
// Allocate device arrays
T * d_in = nullptr;
checkCudaErrors(cudaMalloc(&d_in, numPoints * DIM * sizeof(T)));
rd::gpu::rdMemcpy<rd::ROW_MAJOR, rd::ROW_MAJOR, cudaMemcpyHostToDevice>(
d_in, h_in, DIM, numPoints, DIM, DIM);
testIOBackend<DIM, rd::ROW_MAJOR, T>(d_in, numPoints, selectRatio, selectedPointsCnt,
compareItem);
rd::gpu::rdMemcpy<rd::COL_MAJOR, rd::ROW_MAJOR, cudaMemcpyHostToDevice>(
d_in, h_in, DIM, numPoints, numPoints, DIM);
testIOBackend<DIM, rd::COL_MAJOR, T>(d_in, numPoints, selectRatio, selectedPointsCnt,
compareItem);
// cleanup
if (h_in) delete[] h_in;
if (d_in) checkCudaErrors(cudaFree(d_in));
}
template <
typename T>
void testDim(
int numPoints,
T selectRatio)
{
if (g_logPerfResults)
{
initializeLogFile<T>();
}
#if defined(RD_DEBUG) || defined(RD_PROFILE) || defined(QUICK_TEST)
std::cout << "\nTest DIM 2...\n\n";
testInputMemLayout<10, T>(numPoints, selectRatio);
#else
std::cout << "\nTest DIM 1...\n\n";
testInputMemLayout<1, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 2...\n\n";
testInputMemLayout<2, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 3...\n\n";
testInputMemLayout<3, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 4...\n\n";
testInputMemLayout<4, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 5...\n\n";
testInputMemLayout<5, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 6...\n\n";
testInputMemLayout<6, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 7...\n\n";
testInputMemLayout<7, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 8...\n\n";
testInputMemLayout<8, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 9...\n\n";
testInputMemLayout<9, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 10...\n\n";
testInputMemLayout<10, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 11...\n\n";
testInputMemLayout<11, T>(numPoints, selectRatio);
std::cout << "\nTest DIM 12...\n\n";
testInputMemLayout<12, T>(numPoints, selectRatio);
#endif
if (g_logPerfResults)
{
if (g_logFile) delete g_logFile;
}
}
int main(int argc, char const **argv)
{
int numPoints = -1;
float selectRatio = -1.f;
// Initialize command line
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t--np=<number of input points>\n"
"\t\t[--d=<device id>]\n"
"\t\t[--ratio=<selection ratio, default 0.5>]\n"
"\t\t[--log <log performance results>]\n"
"\t\t[--start <mark start of benchmark in log file>]\n"
// "\t\t[--end <mark end of benchmark in log file>]\n"
"\n", argv[0]);
exit(0);
}
args.GetCmdLineArgument("np", numPoints);
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", g_devId);
}
if (args.CheckCmdLineFlag("ratio"))
{
args.GetCmdLineArgument("ratio", selectRatio);
}
if (args.CheckCmdLineFlag("log"))
{
g_logPerfResults = true;
}
if (args.CheckCmdLineFlag("start"))
{
g_startBenchmark = true;
}
// if (args.CheckCmdLineFlag("end"))
// {
// g_endBenchmark = true;
// }
checkCudaErrors(deviceInit(g_devId));
// set device name for logging and drawing purposes
cudaDeviceProp devProp;
checkCudaErrors(cudaGetDeviceProperties(&devProp, g_devId));
g_devName = devProp.name;
// read device SM count and determine max number of resident blocks per SM
g_devSMCount = devProp.multiProcessorCount;
//-----------------------------------------
// TESTS
//-----------------------------------------
if (numPoints < 0 ||
selectRatio < 0)
{
std::cout << "Have to specify parameters! Rerun with --help for more "
"informations.\n";
exit(1);
}
#ifdef QUICK_TEST
const int DIM = 10;
// allocate host arrays
float * h_in = new float[numPoints * DIM];
// Initialize input
Initialize(h_in, numPoints * DIM);
if (g_logPerfResults)
{
initializeLogFile<float>();
}
// Select a comparison value that is selectRatio through the space of [0,127]
float compareItem;
if (selectRatio <= 0.0)
{
compareItem = 0; // select none
}
else if (selectRatio >= 1.0)
{
compareItem = 127; // select all
}
else
{
compareItem = int(float(float(127) * selectRatio));
}
LessThan<DIM, float> selectOp(compareItem);
int selectedPointsCnt = solve<DIM>(h_in, numPoints, selectOp);
// Allocate device arrays
float * d_in = nullptr;
checkCudaErrors(cudaMalloc(&d_in, numPoints * DIM * sizeof(float)));
rd::gpu::rdMemcpy<rd::ROW_MAJOR, rd::ROW_MAJOR, cudaMemcpyHostToDevice>(
d_in, h_in, DIM, numPoints, DIM, DIM);
prepareAndRunTest<DIM, cub::LOAD_LDG, rd::gpu::IO_BACKEND_CUB, rd::ROW_MAJOR, true>(
d_in, numPoints, selectRatio, selectedPointsCnt, compareItem);
prepareAndRunTest<DIM, cub::LOAD_LDG, rd::gpu::IO_BACKEND_CUB, rd::ROW_MAJOR, false>(
d_in, numPoints, selectRatio, selectedPointsCnt, compareItem);
rd::gpu::rdMemcpy<rd::COL_MAJOR, rd::ROW_MAJOR, cudaMemcpyHostToDevice>(
d_in, h_in, DIM, numPoints, numPoints, DIM);
prepareAndRunTest<DIM, cub::LOAD_LDG, rd::gpu::IO_BACKEND_CUB, rd::COL_MAJOR, true>(
d_in, numPoints, selectRatio, selectedPointsCnt, compareItem);
prepareAndRunTest<DIM, cub::LOAD_LDG, rd::gpu::IO_BACKEND_CUB, rd::COL_MAJOR, false>(
d_in, numPoints, selectRatio, selectedPointsCnt, compareItem);
std::cout << rd::HLINE << std::endl;
// cleanup
if (h_in) delete[] h_in;
if (d_in) checkCudaErrors(cudaFree(d_in));
#else
#ifndef RD_TEST_DBL_PREC
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT: " << std::endl;
testDim<float>(numPoints, selectRatio);
#else
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE: " << std::endl;
testDim<double>(numPoints, selectRatio);
#endif
std::cout << rd::HLINE << std::endl;
#endif
checkCudaErrors(deviceReset());
std::cout << "END!" << std::endl;
return 0;
}
|
3a8a41a19288d4dbb2baae1ad84a43991ef7b6f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <hipcub/hipcub.hpp>
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/prefixScan.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
using namespace cms::cuda;
template <typename T>
struct format_traits {
public:
static const constexpr char *failed_msg = "failed %d %d %d: %d %d\n";
};
template <>
struct format_traits<float> {
public:
static const constexpr char *failed_msg = "failed %d %d %d: %f %f\n";
};
template <typename T>
__global__ void testPrefixScan(uint32_t size) {
__shared__ T ws[32];
__shared__ T c[1024];
__shared__ T co[1024];
auto first = threadIdx.x;
for (auto i = first; i < size; i += blockDim.x)
c[i] = 1;
__syncthreads();
blockPrefixScan(c, co, size, ws);
blockPrefixScan(c, size, ws);
assert(1 == c[0]);
assert(1 == co[0]);
for (auto i = first + 1; i < size; i += blockDim.x) {
if (c[i] != c[i - 1] + 1)
printf(format_traits<T>::failed_msg, size, i, blockDim.x, c[i], c[i - 1]);
assert(c[i] == c[i - 1] + 1);
assert(c[i] == i + 1);
assert(c[i] = co[i]);
}
}
template <typename T>
__global__ void testWarpPrefixScan(uint32_t size) {
assert(size <= 32);
__shared__ T c[1024];
__shared__ T co[1024];
auto i = threadIdx.x;
c[i] = 1;
__syncthreads();
warpPrefixScan(c, co, i, 0xffffffff);
warpPrefixScan(c, i, 0xffffffff);
__syncthreads();
assert(1 == c[0]);
assert(1 == co[0]);
if (i != 0) {
if (c[i] != c[i - 1] + 1)
printf(format_traits<T>::failed_msg, size, i, blockDim.x, c[i], c[i - 1]);
assert(c[i] == c[i - 1] + 1);
assert(c[i] == i + 1);
assert(c[i] = co[i]);
}
}
__global__ void init(uint32_t *v, uint32_t val, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
v[i] = val;
if (i == 0)
printf("init\n");
}
__global__ void verify(uint32_t const *v, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
assert(v[i] == i + 1);
if (i == 0)
printf("verify\n");
}
int main() {
cms::cudatest::requireDevices();
std::cout << "warp level" << std::endl;
// std::cout << "warp 32" << std::endl;
hipLaunchKernelGGL(( testWarpPrefixScan<int>), dim3(1), dim3(32), 0, 0, 32);
hipDeviceSynchronize();
// std::cout << "warp 16" << std::endl;
hipLaunchKernelGGL(( testWarpPrefixScan<int>), dim3(1), dim3(32), 0, 0, 16);
hipDeviceSynchronize();
// std::cout << "warp 5" << std::endl;
hipLaunchKernelGGL(( testWarpPrefixScan<int>), dim3(1), dim3(32), 0, 0, 5);
hipDeviceSynchronize();
std::cout << "block level" << std::endl;
for (int bs = 32; bs <= 1024; bs += 32) {
// std::cout << "bs " << bs << std::endl;
for (int j = 1; j <= 1024; ++j) {
// std::cout << j << std::endl;
hipLaunchKernelGGL(( testPrefixScan<uint16_t>), dim3(1), dim3(bs), 0, 0, j);
hipDeviceSynchronize();
hipLaunchKernelGGL(( testPrefixScan<float>), dim3(1), dim3(bs), 0, 0, j);
hipDeviceSynchronize();
}
}
hipDeviceSynchronize();
int num_items = 200;
for (int ksize = 1; ksize < 4; ++ksize) {
// test multiblock
std::cout << "multiblok" << std::endl;
// Declare, allocate, and initialize device-accessible pointers for input and output
num_items *= 10;
uint32_t *d_in;
uint32_t *d_out1;
uint32_t *d_out2;
cudaCheck(hipMalloc(&d_in, num_items * sizeof(uint32_t)));
cudaCheck(hipMalloc(&d_out1, num_items * sizeof(uint32_t)));
cudaCheck(hipMalloc(&d_out2, num_items * sizeof(uint32_t)));
auto nthreads = 256;
auto nblocks = (num_items + nthreads - 1) / nthreads;
hipLaunchKernelGGL(( init), dim3(nblocks), dim3(nthreads), 0, 0, d_in, 1, num_items);
// the block counter
int32_t *d_pc;
cudaCheck(hipMalloc(&d_pc, sizeof(int32_t)));
cudaCheck(hipMemset(d_pc, 0, 4));
nthreads = 1024;
nblocks = (num_items + nthreads - 1) / nthreads;
hipLaunchKernelGGL(( multiBlockPrefixScan), dim3(nblocks), dim3(nthreads), 0, 0, d_in, d_out1, num_items, d_pc);
hipLaunchKernelGGL(( verify), dim3(nblocks), dim3(nthreads), 0, 0, d_out1, num_items);
hipDeviceSynchronize();
// test cub
std::cout << "cub" << std::endl;
// Determine temporary device storage requirements for inclusive prefix sum
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out2, num_items);
std::cout << "temp storage " << temp_storage_bytes << std::endl;
// Allocate temporary storage for inclusive prefix sum
// fake larger ws already available
temp_storage_bytes *= 8;
cudaCheck(hipMalloc(&d_temp_storage, temp_storage_bytes));
std::cout << "temp storage " << temp_storage_bytes << std::endl;
// Run inclusive prefix sum
CubDebugExit(hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out2, num_items));
std::cout << "temp storage " << temp_storage_bytes << std::endl;
hipLaunchKernelGGL(( verify), dim3(nblocks), dim3(nthreads), 0, 0, d_out2, num_items);
hipDeviceSynchronize();
} // ksize
return 0;
}
| 3a8a41a19288d4dbb2baae1ad84a43991ef7b6f4.cu | #include <iostream>
#include <cub/cub.cuh>
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/prefixScan.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
using namespace cms::cuda;
template <typename T>
struct format_traits {
public:
static const constexpr char *failed_msg = "failed %d %d %d: %d %d\n";
};
template <>
struct format_traits<float> {
public:
static const constexpr char *failed_msg = "failed %d %d %d: %f %f\n";
};
template <typename T>
__global__ void testPrefixScan(uint32_t size) {
__shared__ T ws[32];
__shared__ T c[1024];
__shared__ T co[1024];
auto first = threadIdx.x;
for (auto i = first; i < size; i += blockDim.x)
c[i] = 1;
__syncthreads();
blockPrefixScan(c, co, size, ws);
blockPrefixScan(c, size, ws);
assert(1 == c[0]);
assert(1 == co[0]);
for (auto i = first + 1; i < size; i += blockDim.x) {
if (c[i] != c[i - 1] + 1)
printf(format_traits<T>::failed_msg, size, i, blockDim.x, c[i], c[i - 1]);
assert(c[i] == c[i - 1] + 1);
assert(c[i] == i + 1);
assert(c[i] = co[i]);
}
}
template <typename T>
__global__ void testWarpPrefixScan(uint32_t size) {
assert(size <= 32);
__shared__ T c[1024];
__shared__ T co[1024];
auto i = threadIdx.x;
c[i] = 1;
__syncthreads();
warpPrefixScan(c, co, i, 0xffffffff);
warpPrefixScan(c, i, 0xffffffff);
__syncthreads();
assert(1 == c[0]);
assert(1 == co[0]);
if (i != 0) {
if (c[i] != c[i - 1] + 1)
printf(format_traits<T>::failed_msg, size, i, blockDim.x, c[i], c[i - 1]);
assert(c[i] == c[i - 1] + 1);
assert(c[i] == i + 1);
assert(c[i] = co[i]);
}
}
__global__ void init(uint32_t *v, uint32_t val, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
v[i] = val;
if (i == 0)
printf("init\n");
}
__global__ void verify(uint32_t const *v, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
assert(v[i] == i + 1);
if (i == 0)
printf("verify\n");
}
int main() {
cms::cudatest::requireDevices();
std::cout << "warp level" << std::endl;
// std::cout << "warp 32" << std::endl;
testWarpPrefixScan<int><<<1, 32>>>(32);
cudaDeviceSynchronize();
// std::cout << "warp 16" << std::endl;
testWarpPrefixScan<int><<<1, 32>>>(16);
cudaDeviceSynchronize();
// std::cout << "warp 5" << std::endl;
testWarpPrefixScan<int><<<1, 32>>>(5);
cudaDeviceSynchronize();
std::cout << "block level" << std::endl;
for (int bs = 32; bs <= 1024; bs += 32) {
// std::cout << "bs " << bs << std::endl;
for (int j = 1; j <= 1024; ++j) {
// std::cout << j << std::endl;
testPrefixScan<uint16_t><<<1, bs>>>(j);
cudaDeviceSynchronize();
testPrefixScan<float><<<1, bs>>>(j);
cudaDeviceSynchronize();
}
}
cudaDeviceSynchronize();
int num_items = 200;
for (int ksize = 1; ksize < 4; ++ksize) {
// test multiblock
std::cout << "multiblok" << std::endl;
// Declare, allocate, and initialize device-accessible pointers for input and output
num_items *= 10;
uint32_t *d_in;
uint32_t *d_out1;
uint32_t *d_out2;
cudaCheck(cudaMalloc(&d_in, num_items * sizeof(uint32_t)));
cudaCheck(cudaMalloc(&d_out1, num_items * sizeof(uint32_t)));
cudaCheck(cudaMalloc(&d_out2, num_items * sizeof(uint32_t)));
auto nthreads = 256;
auto nblocks = (num_items + nthreads - 1) / nthreads;
init<<<nblocks, nthreads, 0>>>(d_in, 1, num_items);
// the block counter
int32_t *d_pc;
cudaCheck(cudaMalloc(&d_pc, sizeof(int32_t)));
cudaCheck(cudaMemset(d_pc, 0, 4));
nthreads = 1024;
nblocks = (num_items + nthreads - 1) / nthreads;
multiBlockPrefixScan<<<nblocks, nthreads, 0>>>(d_in, d_out1, num_items, d_pc);
verify<<<nblocks, nthreads, 0>>>(d_out1, num_items);
cudaDeviceSynchronize();
// test cub
std::cout << "cub" << std::endl;
// Determine temporary device storage requirements for inclusive prefix sum
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out2, num_items);
std::cout << "temp storage " << temp_storage_bytes << std::endl;
// Allocate temporary storage for inclusive prefix sum
// fake larger ws already available
temp_storage_bytes *= 8;
cudaCheck(cudaMalloc(&d_temp_storage, temp_storage_bytes));
std::cout << "temp storage " << temp_storage_bytes << std::endl;
// Run inclusive prefix sum
CubDebugExit(cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out2, num_items));
std::cout << "temp storage " << temp_storage_bytes << std::endl;
verify<<<nblocks, nthreads, 0>>>(d_out2, num_items);
cudaDeviceSynchronize();
} // ksize
return 0;
}
|
892504f84ce42a13c917e960382b9480521a8d9a.hip | // !!! This is a file automatically generated by hipify!!!
# include <time.h>
# include <stdlib.h>
# include <stdio.h>
# include <string.h>
# include <hip/hip_runtime.h>
# include <ctime>
#include <hip/hip_runtime.h>
#include "./common/inc/helper_image.h"
float checkGPU(unsigned char * d_result_pixels, int radius, int k);
float cudaPallel(unsigned char * d_result_pixels, int radius);
unsigned int width = 512, height = 512;
float lerp(unsigned char a, unsigned char b, float t) {
return a + (b - a)*t;
}
texture<unsigned char, 2, hipReadModeElementType> g_Texture;
__global__ void Bilinear(unsigned char * dest, float factor, unsigned int w, unsigned int h)
{
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
// ,
if (tidx < w && tidy < h) {
float center = tidx / factor;
unsigned int start = (unsigned int)center;
unsigned int stop = start + 1.0f;
float t = center - start;
unsigned char a = tex2D(g_Texture, tidy + 0.5f, start + 0.5f);
unsigned char b = tex2D(g_Texture, tidy + 0.5f, stop + 0.5f);
float linear = a + (b - a)*t;//lerp(a, b, t);
dest[tidx + tidy * w] = (int)(linear);
}
}
void loadImage(char *file, unsigned char** pixels, unsigned int * width, unsigned int * height)
{
size_t file_length = strlen(file);
if (!strcmp(&file[file_length - 3], "pgm"))
{
if (sdkLoadPGM<unsigned char>(file, pixels, width, height) != true)
{
printf("Failed to load PGM image file: %s\n", file);
exit(EXIT_FAILURE);
}
}
return;
}
void saveImage(char *file, unsigned char* pixels, unsigned int width, unsigned int height)
{
size_t file_length = strlen(file);
if (!strcmp(&file[file_length - 3], "pgm"))
{
sdkSavePGM(file, pixels, width, height);
}
return;
}
int main(int argc, char ** argv)
{
unsigned char * d_result_pixels;
unsigned char * h_result_pixels;
unsigned char * h_pixels = NULL;
unsigned char * d_pixels = NULL;
int factor = 2;
char * src_path = "lena.pgm";
char * d_result_path = "lena_bisfil.pgm";
loadImage(src_path, &h_pixels, &width, &height);
printf("Image size %dx%d\n", width, height);
int image_size = sizeof(unsigned char) * width * height;
h_result_pixels = (unsigned char *)malloc(image_size);
hipMalloc((void **)& d_pixels, image_size);
hipMalloc((void **)& d_result_pixels, image_size);
hipMemcpy(d_pixels, h_pixels, image_size, hipMemcpyHostToDevice);
hipChannelFormatDesc desc = hipCreateChannelDesc<uchar1>();
hipError_t error = hipBindTexture2D(0, &g_Texture, d_pixels, &desc, width, height, width * sizeof(unsigned char));
if (hipSuccess != error) {
printf("ERROR: Failed to bind texture.\n");
exit(-1);
}
else {
printf("Texture was successfully binded\n");
}
int n = 16;
dim3 block(n, n);
dim3 grid(width / n, height / n);
Bilinear << < grid, block >> >(d_result_pixels,factor, width, height);
/*
int N = radius - 1;
int* rs = new int[N];
float* ans1 = new float[N];
for (int i = 1; i < radius; i++) {
rs[i-1] = i;
ans1[i-1] = checkGPU(d_result_pixels,i,100);
std::cout << i << std::endl;
}
std::ofstream out("text1.txt", 'w');
for (int i = 0; i < N; i++) {
out << rs[i] << " ";
}
out << std::endl;
for (int i = 0; i < N; i++) {
out << ans1[i] << " ";
}
out << std::endl;
out.close();
*/
hipMemcpy(h_result_pixels, d_result_pixels, image_size, hipMemcpyDeviceToHost);
saveImage(d_result_path, h_result_pixels, width, height);
hipUnbindTexture(&g_Texture);
hipFree(d_pixels);
hipFree(d_result_pixels);
//delete rs, ans1;
return 0;
}
float checkGPU(unsigned char * d_result_pixels,int radius, int k) {
float time = 0;
for (int i = 0; i < k; i++) {
time += cudaPallel(d_result_pixels,radius);
}
return time / (1000.0f * k);
}
float cudaPallel(unsigned char * d_result_pixels,int radius) {
int n = 16;
dim3 block(n, n);
dim3 grid(width / n, height / n);
//----
hipEvent_t start, stop;
float gpuTime = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//----
Bilinear << < grid, block >> >(d_result_pixels, radius, width, height);
//negative_kernel << < grid, block >> >(d_result_pixels, width, height);
//----
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpuTime, start, stop);
//printf("N, time spent executing by the GPU: %.5f ms\n", gpuTime);
hipEventDestroy(start);
hipEventDestroy(stop);
//----
/* CUDA method */
return gpuTime;
}
| 892504f84ce42a13c917e960382b9480521a8d9a.cu | # include <time.h>
# include <stdlib.h>
# include <stdio.h>
# include <string.h>
# include <cuda.h>
# include <ctime>
#include <cuda_runtime.h>
#include "./common/inc/helper_image.h"
float checkGPU(unsigned char * d_result_pixels, int radius, int k);
float cudaPallel(unsigned char * d_result_pixels, int radius);
unsigned int width = 512, height = 512;
float lerp(unsigned char a, unsigned char b, float t) {
return a + (b - a)*t;
}
texture<unsigned char, 2, cudaReadModeElementType> g_Texture;
__global__ void Bilinear(unsigned char * dest, float factor, unsigned int w, unsigned int h)
{
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
// проверка, что текущие индексы не выходят за границы изображения
if (tidx < w && tidy < h) {
float center = tidx / factor;
unsigned int start = (unsigned int)center;
unsigned int stop = start + 1.0f;
float t = center - start;
unsigned char a = tex2D(g_Texture, tidy + 0.5f, start + 0.5f);
unsigned char b = tex2D(g_Texture, tidy + 0.5f, stop + 0.5f);
float linear = a + (b - a)*t;//lerp(a, b, t);
dest[tidx + tidy * w] = (int)(linear);
}
}
void loadImage(char *file, unsigned char** pixels, unsigned int * width, unsigned int * height)
{
size_t file_length = strlen(file);
if (!strcmp(&file[file_length - 3], "pgm"))
{
if (sdkLoadPGM<unsigned char>(file, pixels, width, height) != true)
{
printf("Failed to load PGM image file: %s\n", file);
exit(EXIT_FAILURE);
}
}
return;
}
void saveImage(char *file, unsigned char* pixels, unsigned int width, unsigned int height)
{
size_t file_length = strlen(file);
if (!strcmp(&file[file_length - 3], "pgm"))
{
sdkSavePGM(file, pixels, width, height);
}
return;
}
int main(int argc, char ** argv)
{
unsigned char * d_result_pixels;
unsigned char * h_result_pixels;
unsigned char * h_pixels = NULL;
unsigned char * d_pixels = NULL;
int factor = 2;
char * src_path = "lena.pgm";
char * d_result_path = "lena_bisfil.pgm";
loadImage(src_path, &h_pixels, &width, &height);
printf("Image size %dx%d\n", width, height);
int image_size = sizeof(unsigned char) * width * height;
h_result_pixels = (unsigned char *)malloc(image_size);
cudaMalloc((void **)& d_pixels, image_size);
cudaMalloc((void **)& d_result_pixels, image_size);
cudaMemcpy(d_pixels, h_pixels, image_size, cudaMemcpyHostToDevice);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar1>();
cudaError_t error = cudaBindTexture2D(0, &g_Texture, d_pixels, &desc, width, height, width * sizeof(unsigned char));
if (cudaSuccess != error) {
printf("ERROR: Failed to bind texture.\n");
exit(-1);
}
else {
printf("Texture was successfully binded\n");
}
int n = 16;
dim3 block(n, n);
dim3 grid(width / n, height / n);
Bilinear << < grid, block >> >(d_result_pixels,factor, width, height);
/*
int N = radius - 1;
int* rs = new int[N];
float* ans1 = new float[N];
for (int i = 1; i < radius; i++) {
rs[i-1] = i;
ans1[i-1] = checkGPU(d_result_pixels,i,100);
std::cout << i << std::endl;
}
std::ofstream out("text1.txt", 'w');
for (int i = 0; i < N; i++) {
out << rs[i] << " ";
}
out << std::endl;
for (int i = 0; i < N; i++) {
out << ans1[i] << " ";
}
out << std::endl;
out.close();
*/
cudaMemcpy(h_result_pixels, d_result_pixels, image_size, cudaMemcpyDeviceToHost);
saveImage(d_result_path, h_result_pixels, width, height);
cudaUnbindTexture(&g_Texture);
cudaFree(d_pixels);
cudaFree(d_result_pixels);
//delete rs, ans1;
return 0;
}
float checkGPU(unsigned char * d_result_pixels,int radius, int k) {
float time = 0;
for (int i = 0; i < k; i++) {
time += cudaPallel(d_result_pixels,radius);
}
return time / (1000.0f * k);
}
float cudaPallel(unsigned char * d_result_pixels,int radius) {
int n = 16;
dim3 block(n, n);
dim3 grid(width / n, height / n);
//----
cudaEvent_t start, stop;
float gpuTime = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//----
Bilinear << < grid, block >> >(d_result_pixels, radius, width, height);
//negative_kernel << < grid, block >> >(d_result_pixels, width, height);
//----
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuTime, start, stop);
//printf("N, time spent executing by the GPU: %.5f ms\n", gpuTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//----
/* CUDA method */
return gpuTime;
}
|
a18287a9bdc189c2438acb978fdaf12973a54fad.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/encode.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
namespace cudf {
namespace dictionary {
namespace detail {
/**
* @brief Decode a column from a dictionary.
*/
std::unique_ptr<column> decode(dictionary_column_view const& source,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
if (source.size() == 0) return make_empty_column(data_type{type_id::EMPTY});
column_view indices{cudf::data_type{cudf::type_id::INT32},
source.size(),
source.indices().head<int32_t>(),
nullptr,
0,
source.offset()}; // no nulls for gather indices
// use gather to create the output column -- use ignore_out_of_bounds=true
auto table_column = cudf::detail::gather(table_view{{source.keys()}},
indices,
cudf::detail::out_of_bounds_policy::IGNORE,
cudf::detail::negative_index_policy::NOT_ALLOWED,
mr,
stream)
->release();
auto output_column = std::unique_ptr<column>(std::move(table_column.front()));
// apply any nulls to the output column
output_column->set_null_mask(copy_bitmask(source.parent(), stream, mr), source.null_count());
return output_column;
}
} // namespace detail
std::unique_ptr<column> decode(dictionary_column_view const& source,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::decode(source, mr);
}
} // namespace dictionary
} // namespace cudf
| a18287a9bdc189c2438acb978fdaf12973a54fad.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/encode.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
namespace cudf {
namespace dictionary {
namespace detail {
/**
* @brief Decode a column from a dictionary.
*/
std::unique_ptr<column> decode(dictionary_column_view const& source,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
if (source.size() == 0) return make_empty_column(data_type{type_id::EMPTY});
column_view indices{cudf::data_type{cudf::type_id::INT32},
source.size(),
source.indices().head<int32_t>(),
nullptr,
0,
source.offset()}; // no nulls for gather indices
// use gather to create the output column -- use ignore_out_of_bounds=true
auto table_column = cudf::detail::gather(table_view{{source.keys()}},
indices,
cudf::detail::out_of_bounds_policy::IGNORE,
cudf::detail::negative_index_policy::NOT_ALLOWED,
mr,
stream)
->release();
auto output_column = std::unique_ptr<column>(std::move(table_column.front()));
// apply any nulls to the output column
output_column->set_null_mask(copy_bitmask(source.parent(), stream, mr), source.null_count());
return output_column;
}
} // namespace detail
std::unique_ptr<column> decode(dictionary_column_view const& source,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::decode(source, mr);
}
} // namespace dictionary
} // namespace cudf
|
fb0a694eb141d39dc238c0a7094b8200da23e3ef.hip | // !!! This is a file automatically generated by hipify!!!
#include <tuple>
#include <limits>
#include <iostream>
#include <chrono>
#include <iomanip>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <hip/hip_runtime.h>
#include <fstream>
#include "kernelBlockedGlobal.hpp"
#define BLOCK_SIZE 32
const auto INF = std::numeric_limits<float>::infinity();
__device__
void block_calc_global(float* matrix, const int baseBlock, int n, const int idy, const int idx){
const int index = idy * n + idx;
if(idx < n && idy < n){
float old = matrix[index];
for(int j = 0; j < BLOCK_SIZE; j++){
int k_index = BLOCK_SIZE * baseBlock + j;
int index1 = idy * n + k_index;
int index2 = k_index * n + idx;
if(k_index < n && matrix[index1] != INF && matrix[index2] != INF){
float sum = matrix[index1] + matrix[index2];
if(sum < old){
matrix[index] = old = sum;
}
}
__syncthreads();
}
matrix[index] = old;
}
}
//diagonale
__global__ void global_Phase1(float* matrix, int baseBlock, int n){
const int idx = BLOCK_SIZE * baseBlock + threadIdx.x;
const int idy = BLOCK_SIZE * baseBlock + threadIdx.y;
block_calc_global(matrix, baseBlock, n, idy, idx);
}
//riga e colonna
__global__ void global_Phase2(float* matrix, int baseBlock, int n){
if(blockIdx.x == baseBlock) return;
int idx = BLOCK_SIZE * baseBlock + threadIdx.x;
int idy = BLOCK_SIZE * baseBlock + threadIdx.y;
if(blockIdx.y == 0) {
idx = BLOCK_SIZE * blockIdx.x + threadIdx.x; //per i blocchi orizzontali cambio indice idx
} else {
idy = BLOCK_SIZE * blockIdx.x + threadIdx.y; //per i blocchi verticali cambio indice idy
}
block_calc_global(matrix, baseBlock, n, idy, idx);
}
//rimanenti
__global__ void global_Phase3(float* matrix, int baseBlock, int n){
if(blockIdx.x == baseBlock || blockIdx.y == baseBlock) {
return;
}
const int idx = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int idy = BLOCK_SIZE * blockIdx.y + threadIdx.y;
block_calc_global(matrix, baseBlock, n, idy, idx);
}
namespace kernelBlockedGlobal {
float kernelBlockedGlobal(float* matrix, int n){
float* matrix_device;
hipEvent_t start, stop;
hipMalloc(&matrix_device, n * n * sizeof(float));
hipMemcpy(matrix_device, matrix, n * n * sizeof(float), hipMemcpyHostToDevice);
int numBlock = ceil(n/float(BLOCK_SIZE));
dim3 Phase1(1,1,1);
dim3 Phase2(numBlock, 2, 1);
dim3 Phase3(numBlock, numBlock, 1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
hipEventCreate(&start);
hipEventCreate(&stop);
float millis =0;
hipEventRecord(start);
for(int baseBlock = 0; baseBlock < numBlock; baseBlock++) {
hipLaunchKernelGGL(( global_Phase1), dim3(Phase1), dim3(dimBlock), 0, 0, matrix_device, baseBlock, n);
hipLaunchKernelGGL(( global_Phase2), dim3(Phase2), dim3(dimBlock), 0, 0, matrix_device, baseBlock, n);
hipLaunchKernelGGL(( global_Phase3), dim3(Phase3), dim3(dimBlock), 0, 0, matrix_device, baseBlock, n);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&millis, start, stop);
float timingCompute = millis/1000;
hipMemcpy(matrix, matrix_device, n * n * sizeof(float), hipMemcpyDeviceToHost);
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(matrix_device);
return timingCompute;
}
}
| fb0a694eb141d39dc238c0a7094b8200da23e3ef.cu | #include <tuple>
#include <limits>
#include <iostream>
#include <chrono>
#include <iomanip>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <cuda.h>
#include <fstream>
#include "kernelBlockedGlobal.hpp"
#define BLOCK_SIZE 32
const auto INF = std::numeric_limits<float>::infinity();
__device__
void block_calc_global(float* matrix, const int baseBlock, int n, const int idy, const int idx){
const int index = idy * n + idx;
if(idx < n && idy < n){
float old = matrix[index];
for(int j = 0; j < BLOCK_SIZE; j++){
int k_index = BLOCK_SIZE * baseBlock + j;
int index1 = idy * n + k_index;
int index2 = k_index * n + idx;
if(k_index < n && matrix[index1] != INF && matrix[index2] != INF){
float sum = matrix[index1] + matrix[index2];
if(sum < old){
matrix[index] = old = sum;
}
}
__syncthreads();
}
matrix[index] = old;
}
}
//diagonale
__global__ void global_Phase1(float* matrix, int baseBlock, int n){
const int idx = BLOCK_SIZE * baseBlock + threadIdx.x;
const int idy = BLOCK_SIZE * baseBlock + threadIdx.y;
block_calc_global(matrix, baseBlock, n, idy, idx);
}
//riga e colonna
__global__ void global_Phase2(float* matrix, int baseBlock, int n){
if(blockIdx.x == baseBlock) return;
int idx = BLOCK_SIZE * baseBlock + threadIdx.x;
int idy = BLOCK_SIZE * baseBlock + threadIdx.y;
if(blockIdx.y == 0) {
idx = BLOCK_SIZE * blockIdx.x + threadIdx.x; //per i blocchi orizzontali cambio indice idx
} else {
idy = BLOCK_SIZE * blockIdx.x + threadIdx.y; //per i blocchi verticali cambio indice idy
}
block_calc_global(matrix, baseBlock, n, idy, idx);
}
//rimanenti
__global__ void global_Phase3(float* matrix, int baseBlock, int n){
if(blockIdx.x == baseBlock || blockIdx.y == baseBlock) {
return;
}
const int idx = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int idy = BLOCK_SIZE * blockIdx.y + threadIdx.y;
block_calc_global(matrix, baseBlock, n, idy, idx);
}
namespace kernelBlockedGlobal {
float kernelBlockedGlobal(float* matrix, int n){
float* matrix_device;
cudaEvent_t start, stop;
cudaMalloc(&matrix_device, n * n * sizeof(float));
cudaMemcpy(matrix_device, matrix, n * n * sizeof(float), cudaMemcpyHostToDevice);
int numBlock = ceil(n/float(BLOCK_SIZE));
dim3 Phase1(1,1,1);
dim3 Phase2(numBlock, 2, 1);
dim3 Phase3(numBlock, numBlock, 1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
cudaEventCreate(&start);
cudaEventCreate(&stop);
float millis =0;
cudaEventRecord(start);
for(int baseBlock = 0; baseBlock < numBlock; baseBlock++) {
global_Phase1<<<Phase1, dimBlock>>>(matrix_device, baseBlock, n);
global_Phase2<<<Phase2, dimBlock>>>(matrix_device, baseBlock, n);
global_Phase3<<<Phase3, dimBlock>>>(matrix_device, baseBlock, n);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millis, start, stop);
float timingCompute = millis/1000;
cudaMemcpy(matrix, matrix_device, n * n * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(matrix_device);
return timingCompute;
}
}
|
cf71eb32a2526f67fe6dc480d86978bfcd917047.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "selection_k_radius_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int b = 2;
int m = 2;
int k = 1;
float radius = 1;
const int *idx = NULL;
hipMalloc(&idx, XSIZE*YSIZE);
const float *val = NULL;
hipMalloc(&val, XSIZE*YSIZE);
int *idx_out = NULL;
hipMalloc(&idx_out, XSIZE*YSIZE);
float *val_out = NULL;
hipMalloc(&val_out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
selection_k_radius_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,m,k,radius,idx,val,idx_out,val_out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
selection_k_radius_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,m,k,radius,idx,val,idx_out,val_out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
selection_k_radius_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,m,k,radius,idx,val,idx_out,val_out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | cf71eb32a2526f67fe6dc480d86978bfcd917047.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "selection_k_radius_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int b = 2;
int m = 2;
int k = 1;
float radius = 1;
const int *idx = NULL;
cudaMalloc(&idx, XSIZE*YSIZE);
const float *val = NULL;
cudaMalloc(&val, XSIZE*YSIZE);
int *idx_out = NULL;
cudaMalloc(&idx_out, XSIZE*YSIZE);
float *val_out = NULL;
cudaMalloc(&val_out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
selection_k_radius_gpu<<<gridBlock,threadBlock>>>(b,m,k,radius,idx,val,idx_out,val_out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
selection_k_radius_gpu<<<gridBlock,threadBlock>>>(b,m,k,radius,idx,val,idx_out,val_out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
selection_k_radius_gpu<<<gridBlock,threadBlock>>>(b,m,k,radius,idx,val,idx_out,val_out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e46db6615b1dd70635cae476ea2c67895c721b06.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void AbsValLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
greentea_gpu_abs<Dtype>(this->device_context_->id(), count,
(cl_mem) (bottom[0]->gpu_data()), 0,
(cl_mem) (top_data), 0);
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void AbsValLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const int count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_sign(count, bottom_data, bottom_diff);
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
greentea_gpu_sign<Dtype>(this->device_context_->id(), count,
(cl_mem) bottom_data, 0, (cl_mem) bottom_diff,
0);
greentea_gpu_mul<Dtype>(this->device_context_->id(), count,
(cl_mem) bottom_diff, 0, (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0);
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer);
} // namespace caffe
| e46db6615b1dd70635cae476ea2c67895c721b06.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void AbsValLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
greentea_gpu_abs<Dtype>(this->device_context_->id(), count,
(cl_mem) (bottom[0]->gpu_data()), 0,
(cl_mem) (top_data), 0);
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void AbsValLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const int count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_sign(count, bottom_data, bottom_diff);
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
greentea_gpu_sign<Dtype>(this->device_context_->id(), count,
(cl_mem) bottom_data, 0, (cl_mem) bottom_diff,
0);
greentea_gpu_mul<Dtype>(this->device_context_->id(), count,
(cl_mem) bottom_diff, 0, (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0);
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer);
} // namespace caffe
|
282c049c88ac0a87696f5b2353105888a7a06626.hip | // !!! This is a file automatically generated by hipify!!!
/*
3D Cellular Automata Simulation,
this Ignores cuda, runs simulation on CPU
- Sparsh
opengl code referenced from : https://docs.nvidia.com/cuda/cuda-samples/index.html#simple-opengl
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <helper_gl.h>
#include <GL/freeglut.h>
// includes, cuda
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <hip/hip_vector_types.h>
#define MAX_EPSILON_ERROR 10.0f
#define THRESHOLD 0.30f
#define REFRESH_DELAY 10 //ms
////////////////////////////////////////////////////////////////////////////////
// constants
const unsigned int window_width = 1024;
const unsigned int window_height = 1024;
const unsigned int mesh_width = 32;
const unsigned int mesh_height = 32;
const unsigned int mesh_length = 32;
// vbo variables
GLuint vbo;
struct cudaGraphicsResource* cuda_vbo_resource;
void* d_vbo_buffer = NULL;
GLuint loc_vbo;
struct cudaGraphicsResource* cuda_loc_vbo_resource;
void* d_loc_vbo_buffer = NULL;
int* state;
int* state_next;
int* d_state;
int* d_state_next;
float g_fAnim = 0.0;
// mouse controls
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_y = 0.0;
float translate_z = -3.0;
StopWatchInterface* timer = NULL;
// Auto-Verification Code
int fpsCount = 0; // FPS count for averaging
int fpsLimit = 1; // FPS limit for sampling
int g_Index = 0;
float avgFPS = 0.0f;
unsigned int frameCount = 0;
unsigned int g_TotalErrors = 0;
bool g_bQAReadback = false;
int* pArgc = NULL;
char** pArgv = NULL;
#define MAX(a,b) ((a > b) ? a : b)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
bool runTest(int argc, char** argv, char* ref_file);
void cleanup();
// GL functionality
bool initGL(int* argc, char** argv);
void createVBO(GLuint* vbo, struct cudaGraphicsResource** vbo_res,
unsigned int vbo_res_flags);
void deleteVBO(GLuint* vbo, struct cudaGraphicsResource* vbo_res);
// rendering callbacks
void display();
void keyboard(unsigned char key, int x, int y);
void mouse(int button, int state, int x, int y);
void motion(int x, int y);
void timerEvent(int value);
//misc
void createVBOseqential(GLuint* vbo);
void runSequential(GLuint* vbo_resource, int* state, int* state_next);
// Cuda functionality
void runCuda(struct cudaGraphicsResource** vbo_resource, int* state, int* state_next, int* d_state, int* d_state_next);
void runAutoTest(int devID, char** argv, char* ref_file);
void checkResultCuda(int argc, char** argv, const GLuint& vbo);
const char* sSDKsample = "simpleGL (VBO)";
/*
starting config of the state, input is array of coords you want to spawn live cell
*/
void initializeStartState(int x[], int y[], int z[], int count) {
// casual fallback on null input, TODO refactor to remove this
if (x == NULL) {
for(int x = 0; x < mesh_width; x++)
for (int y = 0; y < mesh_width; y++)
for (int z = 0; z < mesh_width; z++)
{
//printf("\n%d %d %d", x, y, z);
int loc = (mesh_width * (y * mesh_width + x)) + z;
state[loc] = 0;
state_next[loc] = 0;
}
int mid = mesh_width / 2;
int tx, ty, tz;
tx = mid;
ty = mid;
tz = mid;
int loc = (mesh_width * (ty * mesh_width + tx)) + tz;
//printf("\nstate's address = %d", state);
state[loc] = 1;
//printf("\ninitialized at offset = %d", loc);
tx = mid + 1;
ty = mid + 1;
tz = mid + 1;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;
//printf("\ninitialized at offset = %d", loc);
tx = mid - 1;
ty = mid - 1;
tz = mid - 1;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;
//printf("\ninitialized at offset = %d", loc);
/*tx = mid + 1;
ty = mid;
tz = mid + 1;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;*/
/*tx = mid;
ty = mid + 1;
tz = mid;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;*/
/*tx = mid + 8;
ty = mid - 8;
tz = mid - 8;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;
tx = mid + 8;
ty = mid - 8;
tz = mid - 7;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;
tx = mid + 8;
ty = mid - 7;
tz = mid - 8;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;
*/
return;
}
for (int i = 0; i < count; i++) {
int loc = (mesh_width * (y[i] * mesh_width + x[i])) + z[i];
state[loc] = 1;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void simple_vbo_kernel(float4* pos, unsigned int length, unsigned int width, unsigned int height, float time, int* state, int* state_next)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z * blockDim.z + threadIdx.z;
int my_offset = (width * (y * width + x)) + z;
//printf("\nI am %d, %d, %d as %d", x,y,z,my_offset);
// calculate uvw coordinates
float u = x / (float)width;
float v = y / (float)height;
float w = z / (float)length;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
w = w * 2.0f - 1.0f;
//evolution rules
int count_live = 0, count_dead = 0;
//thank you ms excel
int dx[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1};
int dy[] = {-1, -1, -1, 0, 0, 0, 1, 1, 1, -1, -1, -1, 0, 0, 0, 1, 1, 1, -1, -1, -1, 0, 0, 0, 1, 1, 1};
int dz[] = {-1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1};
for (int i = 0; i < 27; i++) {
int nx = x + dx[i], ny = y + dy[i], nz = z + dz[i];
if (MAX(MAX(nx, ny), nz) >= width || MIN(MIN(nx, ny), nz) < 0) {
continue;
}
if (nx == x && ny == y && nz == z) {
continue;
}
int offset = (width * (ny * width + nx)) + nz;
if (state[offset] == 1) {
count_live++;
//printf("\nvisiting xyz = %d %d %d neighbor = %d %d %d found alive cell alive count is %d", x, y, z, nx, ny, nz, count_live);
}else count_dead++;
}
//basic test rule, spawn new if neighborhood has 2 alive and im empty
// & write output vertex
if (state[my_offset] == 0 && count_live == 2) {
//printf("\noffset in cuda: %d, xyz %d %d %d, uvw %f %f %f", my_offset, x, y, z, u, v, w);
state_next[my_offset] = 1;
pos[my_offset] = make_float4(u, v, w, 1.0f);
}
else if (state[my_offset] == 1 && count_live >= 6) {
state_next[my_offset] = 0;
pos[my_offset] = make_float4(1, 1, 1, 1.0f);
}
else {
//printf("\nelse offset in cuda: %d, xyz %d %d %d, uvw %f %f %f", my_offset, x, y, z, u, v, w);
state_next[my_offset] = state[my_offset];
pos[my_offset] = make_float4(1, 1, 1, 1.0f);
}
}
void launch_kernel(float4* pos, unsigned int mesh_width,
unsigned int mesh_height, float time, int* d_state, int* d_state_next)
{
// execute the kernel
dim3 block(8, 8, 8);
dim3 grid(mesh_width / block.x, mesh_width / block.y, mesh_height / block.z);
simple_vbo_kernel << < grid, block >> > (pos, mesh_width, mesh_width, mesh_height, time, d_state, d_state_next);
}
bool checkHW(char* name, const char* gpuType, int dev)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
strcpy(name, deviceProp.name);
if (!STRNCASECMP(deviceProp.name, gpuType, strlen(gpuType)))
{
return true;
}
else
{
return false;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
char* ref_file = NULL;
pArgc = &argc;
pArgv = argv;
#if defined(__linux__)
setenv("DISPLAY", ":0", 0);
#endif
printf("%s starting...@main1 !!!\n", sSDKsample);
if (argc > 1)
{
if (checkCmdLineFlag(argc, (const char**)argv, "file"))
{
// In this mode, we are running non-OpenGL and doing a compare of the VBO was generated correctly
getCmdLineArgumentString(argc, (const char**)argv, "file", (char**)&ref_file);
}
}
printf("\n");
runTest(argc, argv, ref_file);
printf("%s completed, returned %s\n", sSDKsample, (g_TotalErrors == 0) ? "OK" : "ERROR!");
exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
void computeFPS()
{
frameCount++;
fpsCount++;
if (fpsCount == fpsLimit)
{
avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f);
fpsCount = 0;
fpsLimit = (int)MAX(avgFPS, 1.f);
sdkResetTimer(&timer);
}
char fps[256];
sprintf(fps, "Cuda GL Interop (VBO): %3.1f fps (Max 100Hz)", avgFPS);
glutSetWindowTitle(fps);
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize GL
////////////////////////////////////////////////////////////////////////////////
bool initGL(int* argc, char** argv)
{
glutInit(argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("Cuda GL Interop (VBO)");
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutMotionFunc(motion);
glutTimerFunc(REFRESH_DELAY, timerEvent, 0);
// initialize necessary OpenGL extensions
if (!isGLVersionSupported(2, 0))
{
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
return false;
}
// default initialization
glClearColor(0.0, 0.0, 0.0, 1.0);
glDisable(GL_DEPTH_TEST);
// viewport
glViewport(0, 0, window_width, window_height);
// projection
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, (GLfloat)window_width / (GLfloat)window_height, 0.1, 10.0);
SDK_CHECK_ERROR_GL();
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
bool runTest(int argc, char** argv, char* ref_file)
{
// Create the CUTIL timer
sdkCreateTimer(&timer);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
int devID = findCudaDevice(argc, (const char**)argv);
// command line mode only
if (ref_file != NULL)
{
// create VBO
checkCudaErrors(hipMalloc((void**)&d_vbo_buffer, mesh_width * mesh_height * 4 * sizeof(float)));
// run the cuda part
runAutoTest(devID, argv, ref_file);
// check result of Cuda step
checkResultCuda(argc, argv, vbo);
hipFree(d_vbo_buffer);
d_vbo_buffer = NULL;
}
else
{
// First initialize OpenGL context, so we can properly set the GL for CUDA.
// This is necessary in order to achieve optimal performance with OpenGL/CUDA interop.
if (false == initGL(&argc, argv))
{
return false;
}
// register callbacks
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutMouseFunc(mouse);
glutMotionFunc(motion);
glutCloseFunc(cleanup);
// create VBO
createVBOseqential(&loc_vbo);
//allocate state grid on host
state = (int*)malloc(mesh_width * mesh_width * mesh_height * sizeof(int));
state_next = (int*)malloc(mesh_width * mesh_width * mesh_height * sizeof(int));
//initialize first state
initializeStartState(NULL, NULL, NULL, NULL);
//sequential run comment out runCuda when you use this.
runSequential(&loc_vbo, state, state_next);
memcpy(state, state_next, sizeof(int) * mesh_width * mesh_width * mesh_width);
//printf("\n after an iteration state address is %d", state);
int mid = mesh_width / 2;
int tx, ty, tz;
tx = mid;
ty = mid;
tz = mid;
int loc = (mesh_width * (ty * mesh_width + tx)) + tz;
//printf("\nafter process value at the offset %d = %d",loc, state[loc]);
tx = mid+1;
ty = mid+1;
tz = mid+1;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
//printf("\nafter process value at the offset %d = %d", loc, state[loc]);
tx = mid-1;
ty = mid-1;
tz = mid-1;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
//printf("\nafter process value at the offset %d = %d", loc, state[loc]);
//end of sequential call
// start rendering mainloop
glutMainLoop();
}
return true;
}
void runSequential(GLuint* vbo_resource, int* state, int* state_next) {
float* temp4Floats = (float*)malloc(sizeof(float)*4);
for (int x = 0; x < mesh_width; x++) {
for (int y = 0; y < mesh_width; y++) {
for (int z = 0; z < mesh_width; z++) {
//printf("computing (%d %d %d) ", x, y, z);
int my_offset = (mesh_width * (y * mesh_width + x)) + z;
// calculate uvw coordinates
float u = x / (float)mesh_width;
float v = y / (float)mesh_height;
float w = z / (float)mesh_width;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
w = w * 2.0f - 1.0f;
//evolution rules
int count_live = 0, count_dead = 0;
//thank you ms excel
int dx[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
int dy[] = { -1, -1, -1, 0, 0, 0, 1, 1, 1, -1, -1, -1, 0, 0, 0, 1, 1, 1, -1, -1, -1, 0, 0, 0, 1, 1, 1 };
int dz[] = { -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1 };
for (int i = 0; i < 27; i++) {
int nx = x + dx[i], ny = y + dy[i], nz = z + dz[i];
if (MAX(MAX(nx, ny), nz) >= mesh_width || MIN(MIN(nx, ny), nz) < 0) {
continue;
}
if (nx == x && ny == y && nz == z) {
continue;
}
int offset = (mesh_width * (ny * mesh_width + nx)) + nz;
if (state[offset] == 1) {
count_live++;
//printf("\nvisiting xyz = %d %d %d neighbor = %d %d %d found alive cell alive count is %d", x, y, z, nx, ny, nz, count_live);
}
else count_dead++;
}
//state_next[my_offset] = state[my_offset];
//*(temp4Floats + 0) = u;*(temp4Floats + 1) = v;*(temp4Floats + 2) = w;*(temp4Floats + 3) = 1.0f;
//glBufferSubData(GL_ARRAY_BUFFER, 16 * my_offset, GL_DYNAMIC_DRAW, temp4Floats);
//continue;
//basic test rule, spawn new if neighborhood has 2 alive and im empty
// & write output vertex
//TODO while we can still get the idea of the performance of this, for some reason points are
//rendered of one half of the world, need to investigate wat is wrong, glBufferSubData needs investigation
if (state[my_offset] == 0 && count_live == 2) {
state_next[my_offset] = 1;
*(temp4Floats + 0) = u;*(temp4Floats + 1) = v;*(temp4Floats + 2) = w;*(temp4Floats + 3) = 1.0f;
glBindBuffer(GL_ARRAY_BUFFER, loc_vbo);
glVertexPointer(3, GL_FLOAT, 16, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glBufferSubData(GL_ARRAY_BUFFER, 16*my_offset, GL_DYNAMIC_DRAW, temp4Floats);
glDisableClientState(GL_VERTEX_ARRAY);
}
else if (state[my_offset] == 1 && count_live >= 6) {
state_next[my_offset] = 0;
*(temp4Floats + 0) = 1;*(temp4Floats + 1) = 1;*(temp4Floats + 2) = 1;*(temp4Floats + 3) = 1.0f;
glBindBuffer(GL_ARRAY_BUFFER, loc_vbo);
glVertexPointer(3, GL_FLOAT, 16, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glBufferSubData(GL_ARRAY_BUFFER, 16*my_offset, GL_DYNAMIC_DRAW, temp4Floats);
glDisableClientState(GL_VERTEX_ARRAY);
}
else {
//printf("\nelse offset in cuda: %d, xyz %d %d %d, uvw %f %f %f", my_offset, x, y, z, u, v, w);
state_next[my_offset] = state[my_offset];
*(temp4Floats + 0) = 1;*(temp4Floats + 1) = 1;*(temp4Floats + 2) = 1;*(temp4Floats + 3) = 1.0f;
glBindBuffer(GL_ARRAY_BUFFER, loc_vbo);
glVertexPointer(3, GL_FLOAT, 16, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glBufferSubData(GL_ARRAY_BUFFER, 16*my_offset, GL_DYNAMIC_DRAW, temp4Floats);
glDisableClientState(GL_VERTEX_ARRAY);
}
}
}
}
free(temp4Floats);
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void runCuda(struct cudaGraphicsResource** vbo_resource, int* state, int* state_next, int* d_state, int* d_state_next)
{
// map OpenGL buffer object for writing from CUDA
float4* dptr;
checkCudaErrors(hipGraphicsMapResources(1, vbo_resource, 0));
size_t num_bytes;
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&dptr, &num_bytes,
*vbo_resource));
//printf("CUDA mapped VBO: May access %ld bytes\n", num_bytes);
//allocate device memory
checkCudaErrors(hipMalloc((void**)&d_state, mesh_width * mesh_width * mesh_height * sizeof(int)));
checkCudaErrors(hipMalloc((void**)&d_state_next, mesh_width * mesh_width * mesh_height * sizeof(int)));
//printf(state);printf(state_next);printf(d_state);printf(d_state_next);
//fill device memory
checkCudaErrors(hipMemcpy(d_state, state, mesh_width * mesh_width * mesh_height * sizeof(int), hipMemcpyHostToDevice));
//printf("\nmemcpy happened fine\n");
/* CUDA KERNEL CALL */
int thread1D = 8; //MAX 64 for this GTX 1660Ti
dim3 block(thread1D, thread1D, thread1D);
dim3 grid(mesh_width / block.x, mesh_width / block.y, mesh_height / block.z);
hipLaunchKernelGGL(( simple_vbo_kernel) , dim3(grid), dim3(block) , 0, 0, dptr, mesh_width, mesh_width, mesh_height, g_fAnim, d_state, d_state_next);
//next input state updated
checkCudaErrors(hipMemcpy(state, d_state_next, mesh_width * mesh_width * mesh_height * sizeof(int), hipMemcpyDeviceToHost));
//int mid = mesh_width/2, x = mid, y = mid, z = mid, loc = (mesh_width * (y * mesh_width + x)) + z;
//state[loc] = 1;
//printf("\nvalue at state[mid] on host after cudaCpy = %d", state[loc]);
//flush device memory
checkCudaErrors(hipFree(d_state));checkCudaErrors(hipFree(d_state_next));
//state_next freeing causing exceptions, TODO investigate reason
//free(state_next);
// unmap buffer object
checkCudaErrors(hipGraphicsUnmapResources(1, vbo_resource, 0));
}
#ifdef _WIN32
#ifndef FOPEN
#define FOPEN(fHandle,filename,mode) fopen_s(&fHandle, filename, mode)
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle,filename,mode) (fHandle = fopen(filename, mode))
#endif
#endif
void sdkDumpBin2(void* data, unsigned int bytes, const char* filename)
{
printf("sdkDumpBin: <%s>\n", filename);
FILE* fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void runAutoTest(int devID, char** argv, char* ref_file)
{
char* reference_file = NULL;
void* imageData = malloc(mesh_width * mesh_height * sizeof(float));
// execute the kernel
//launch_kernel((float4*)d_vbo_buffer, mesh_width, mesh_height, g_fAnim);
hipDeviceSynchronize();
getLastCudaError("launch_kernel failed");
checkCudaErrors(hipMemcpy(imageData, d_vbo_buffer, mesh_width * mesh_height * sizeof(float), hipMemcpyDeviceToHost));
sdkDumpBin2(imageData, mesh_width * mesh_height * sizeof(float), "simpleGL.bin");
reference_file = sdkFindFilePath(ref_file, argv[0]);
if (reference_file &&
!sdkCompareBin2BinFloat("simpleGL.bin", reference_file,
mesh_width * mesh_height * sizeof(float),
MAX_EPSILON_ERROR, THRESHOLD, pArgv[0]))
{
g_TotalErrors++;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Create VBO
////////////////////////////////////////////////////////////////////////////////
void createVBO(GLuint* vbo, struct cudaGraphicsResource** vbo_res,
unsigned int vbo_res_flags)
{
assert(vbo);
// create buffer object
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
// initialize buffer object
unsigned int size = mesh_width * mesh_width * mesh_height * 4 * sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// register this buffer object with CUDA
checkCudaErrors(hipGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags));
SDK_CHECK_ERROR_GL();
}
/*
for sequential create VBO
*/
void createVBOseqential(GLuint* vbo) {
assert(vbo);
// create buffer object
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
// initialize buffer object
unsigned int size = mesh_width * mesh_width * mesh_height * 4 * sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);// do it here.
glBindBuffer(GL_ARRAY_BUFFER, 0);
SDK_CHECK_ERROR_GL();
}
////////////////////////////////////////////////////////////////////////////////
//! Delete VBO
////////////////////////////////////////////////////////////////////////////////
void deleteVBO(GLuint* vbo, struct cudaGraphicsResource* vbo_res)
{
// unregister this buffer object with CUDA
checkCudaErrors(hipGraphicsUnregisterResource(vbo_res));
glBindBuffer(1, *vbo);
glDeleteBuffers(1, vbo);
*vbo = 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Display callback
////////////////////////////////////////////////////////////////////////////////
void display()
{
sdkStartTimer(&timer);
//sequential state update
runSequential(&loc_vbo, state, state_next);
memcpy(state, state_next, sizeof(int) * mesh_width * mesh_width * mesh_width);
//end of sequential block
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// set view matrix
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0, 0.0, translate_z);
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_y, 0.0, 1.0, 0.0);
// render from the vbo
glBindBuffer(GL_ARRAY_BUFFER, loc_vbo);
glVertexPointer(4, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glColor3f(0.0, 1.0, 0.4);
glPointSize(1.0f);
glDrawArrays(GL_POINTS, 0, mesh_width * mesh_width * mesh_height); //added third multiplier
glDisableClientState(GL_VERTEX_ARRAY);
glutSwapBuffers();
g_fAnim += 0.01f;
sdkStopTimer(&timer);
computeFPS();
}
void timerEvent(int value)
{
if (glutGetWindow())
{
glutPostRedisplay();
glutTimerFunc(REFRESH_DELAY, timerEvent, 0);
}
}
void cleanup()
{
sdkDeleteTimer(&timer);
if (vbo)
{
deleteVBO(&vbo, cuda_vbo_resource);
}
if (loc_vbo) {
deleteVBO(&loc_vbo, cuda_loc_vbo_resource);
}
free(state);free(state_next);
}
////////////////////////////////////////////////////////////////////////////////
//! Keyboard events handler
////////////////////////////////////////////////////////////////////////////////
void keyboard(unsigned char key, int /*x*/, int /*y*/)
{
switch (key)
{
case (27):
glutDestroyWindow(glutGetWindow());
return;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Mouse event handlers
////////////////////////////////////////////////////////////////////////////////
void mouse(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
mouse_buttons |= 1 << button;
}
else if (state == GLUT_UP)
{
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
}
void motion(int x, int y)
{
float dx, dy;
dx = (float)(x - mouse_old_x);
dy = (float)(y - mouse_old_y);
if (mouse_buttons & 1)
{
rotate_x += dy * 0.2f;
rotate_y += dx * 0.2f;
}
else if (mouse_buttons & 4)
{
translate_z += dy * 0.01f;
}
mouse_old_x = x;
mouse_old_y = y;
}
////////////////////////////////////////////////////////////////////////////////
//! Check if the result is correct or write data to file for external
//! regression testing
////////////////////////////////////////////////////////////////////////////////
void checkResultCuda(int argc, char** argv, const GLuint& vbo)
{
if (!d_vbo_buffer)
{
checkCudaErrors(hipGraphicsUnregisterResource(cuda_vbo_resource));
// map buffer object
glBindBuffer(GL_ARRAY_BUFFER, vbo);
float* data = (float*)glMapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY);
// check result
if (checkCmdLineFlag(argc, (const char**)argv, "regression"))
{
// write file for regression test
sdkWriteFile<float>("./data/regression.dat",
data, mesh_width * mesh_height * 3, 0.0, false);
}
// unmap GL buffer object
if (!glUnmapBuffer(GL_ARRAY_BUFFER))
{
fprintf(stderr, "Unmap buffer failed.\n");
fflush(stderr);
}
checkCudaErrors(hipGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo,
hipGraphicsMapFlagsWriteDiscard));
SDK_CHECK_ERROR_GL();
}
}
| 282c049c88ac0a87696f5b2353105888a7a06626.cu | /*
3D Cellular Automata Simulation,
this Ignores cuda, runs simulation on CPU
- Sparsh
opengl code referenced from : https://docs.nvidia.com/cuda/cuda-samples/index.html#simple-opengl
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <helper_gl.h>
#include <GL/freeglut.h>
// includes, cuda
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <vector_types.h>
#define MAX_EPSILON_ERROR 10.0f
#define THRESHOLD 0.30f
#define REFRESH_DELAY 10 //ms
////////////////////////////////////////////////////////////////////////////////
// constants
const unsigned int window_width = 1024;
const unsigned int window_height = 1024;
const unsigned int mesh_width = 32;
const unsigned int mesh_height = 32;
const unsigned int mesh_length = 32;
// vbo variables
GLuint vbo;
struct cudaGraphicsResource* cuda_vbo_resource;
void* d_vbo_buffer = NULL;
GLuint loc_vbo;
struct cudaGraphicsResource* cuda_loc_vbo_resource;
void* d_loc_vbo_buffer = NULL;
int* state;
int* state_next;
int* d_state;
int* d_state_next;
float g_fAnim = 0.0;
// mouse controls
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_y = 0.0;
float translate_z = -3.0;
StopWatchInterface* timer = NULL;
// Auto-Verification Code
int fpsCount = 0; // FPS count for averaging
int fpsLimit = 1; // FPS limit for sampling
int g_Index = 0;
float avgFPS = 0.0f;
unsigned int frameCount = 0;
unsigned int g_TotalErrors = 0;
bool g_bQAReadback = false;
int* pArgc = NULL;
char** pArgv = NULL;
#define MAX(a,b) ((a > b) ? a : b)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
bool runTest(int argc, char** argv, char* ref_file);
void cleanup();
// GL functionality
bool initGL(int* argc, char** argv);
void createVBO(GLuint* vbo, struct cudaGraphicsResource** vbo_res,
unsigned int vbo_res_flags);
void deleteVBO(GLuint* vbo, struct cudaGraphicsResource* vbo_res);
// rendering callbacks
void display();
void keyboard(unsigned char key, int x, int y);
void mouse(int button, int state, int x, int y);
void motion(int x, int y);
void timerEvent(int value);
//misc
void createVBOseqential(GLuint* vbo);
void runSequential(GLuint* vbo_resource, int* state, int* state_next);
// Cuda functionality
void runCuda(struct cudaGraphicsResource** vbo_resource, int* state, int* state_next, int* d_state, int* d_state_next);
void runAutoTest(int devID, char** argv, char* ref_file);
void checkResultCuda(int argc, char** argv, const GLuint& vbo);
const char* sSDKsample = "simpleGL (VBO)";
/*
starting config of the state, input is array of coords you want to spawn live cell
*/
void initializeStartState(int x[], int y[], int z[], int count) {
// casual fallback on null input, TODO refactor to remove this
if (x == NULL) {
for(int x = 0; x < mesh_width; x++)
for (int y = 0; y < mesh_width; y++)
for (int z = 0; z < mesh_width; z++)
{
//printf("\n%d %d %d", x, y, z);
int loc = (mesh_width * (y * mesh_width + x)) + z;
state[loc] = 0;
state_next[loc] = 0;
}
int mid = mesh_width / 2;
int tx, ty, tz;
tx = mid;
ty = mid;
tz = mid;
int loc = (mesh_width * (ty * mesh_width + tx)) + tz;
//printf("\nstate's address = %d", state);
state[loc] = 1;
//printf("\ninitialized at offset = %d", loc);
tx = mid + 1;
ty = mid + 1;
tz = mid + 1;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;
//printf("\ninitialized at offset = %d", loc);
tx = mid - 1;
ty = mid - 1;
tz = mid - 1;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;
//printf("\ninitialized at offset = %d", loc);
/*tx = mid + 1;
ty = mid;
tz = mid + 1;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;*/
/*tx = mid;
ty = mid + 1;
tz = mid;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;*/
/*tx = mid + 8;
ty = mid - 8;
tz = mid - 8;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;
tx = mid + 8;
ty = mid - 8;
tz = mid - 7;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;
tx = mid + 8;
ty = mid - 7;
tz = mid - 8;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
state[loc] = 1;
*/
return;
}
for (int i = 0; i < count; i++) {
int loc = (mesh_width * (y[i] * mesh_width + x[i])) + z[i];
state[loc] = 1;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void simple_vbo_kernel(float4* pos, unsigned int length, unsigned int width, unsigned int height, float time, int* state, int* state_next)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z * blockDim.z + threadIdx.z;
int my_offset = (width * (y * width + x)) + z;
//printf("\nI am %d, %d, %d as %d", x,y,z,my_offset);
// calculate uvw coordinates
float u = x / (float)width;
float v = y / (float)height;
float w = z / (float)length;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
w = w * 2.0f - 1.0f;
//evolution rules
int count_live = 0, count_dead = 0;
//thank you ms excel
int dx[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1};
int dy[] = {-1, -1, -1, 0, 0, 0, 1, 1, 1, -1, -1, -1, 0, 0, 0, 1, 1, 1, -1, -1, -1, 0, 0, 0, 1, 1, 1};
int dz[] = {-1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1};
for (int i = 0; i < 27; i++) {
int nx = x + dx[i], ny = y + dy[i], nz = z + dz[i];
if (MAX(MAX(nx, ny), nz) >= width || MIN(MIN(nx, ny), nz) < 0) {
continue;
}
if (nx == x && ny == y && nz == z) {
continue;
}
int offset = (width * (ny * width + nx)) + nz;
if (state[offset] == 1) {
count_live++;
//printf("\nvisiting xyz = %d %d %d neighbor = %d %d %d found alive cell alive count is %d", x, y, z, nx, ny, nz, count_live);
}else count_dead++;
}
//basic test rule, spawn new if neighborhood has 2 alive and im empty
// & write output vertex
if (state[my_offset] == 0 && count_live == 2) {
//printf("\noffset in cuda: %d, xyz %d %d %d, uvw %f %f %f", my_offset, x, y, z, u, v, w);
state_next[my_offset] = 1;
pos[my_offset] = make_float4(u, v, w, 1.0f);
}
else if (state[my_offset] == 1 && count_live >= 6) {
state_next[my_offset] = 0;
pos[my_offset] = make_float4(1, 1, 1, 1.0f);
}
else {
//printf("\nelse offset in cuda: %d, xyz %d %d %d, uvw %f %f %f", my_offset, x, y, z, u, v, w);
state_next[my_offset] = state[my_offset];
pos[my_offset] = make_float4(1, 1, 1, 1.0f);
}
}
void launch_kernel(float4* pos, unsigned int mesh_width,
unsigned int mesh_height, float time, int* d_state, int* d_state_next)
{
// execute the kernel
dim3 block(8, 8, 8);
dim3 grid(mesh_width / block.x, mesh_width / block.y, mesh_height / block.z);
simple_vbo_kernel << < grid, block >> > (pos, mesh_width, mesh_width, mesh_height, time, d_state, d_state_next);
}
bool checkHW(char* name, const char* gpuType, int dev)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
strcpy(name, deviceProp.name);
if (!STRNCASECMP(deviceProp.name, gpuType, strlen(gpuType)))
{
return true;
}
else
{
return false;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
char* ref_file = NULL;
pArgc = &argc;
pArgv = argv;
#if defined(__linux__)
setenv("DISPLAY", ":0", 0);
#endif
printf("%s starting...@main1 !!!\n", sSDKsample);
if (argc > 1)
{
if (checkCmdLineFlag(argc, (const char**)argv, "file"))
{
// In this mode, we are running non-OpenGL and doing a compare of the VBO was generated correctly
getCmdLineArgumentString(argc, (const char**)argv, "file", (char**)&ref_file);
}
}
printf("\n");
runTest(argc, argv, ref_file);
printf("%s completed, returned %s\n", sSDKsample, (g_TotalErrors == 0) ? "OK" : "ERROR!");
exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
void computeFPS()
{
frameCount++;
fpsCount++;
if (fpsCount == fpsLimit)
{
avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f);
fpsCount = 0;
fpsLimit = (int)MAX(avgFPS, 1.f);
sdkResetTimer(&timer);
}
char fps[256];
sprintf(fps, "Cuda GL Interop (VBO): %3.1f fps (Max 100Hz)", avgFPS);
glutSetWindowTitle(fps);
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize GL
////////////////////////////////////////////////////////////////////////////////
bool initGL(int* argc, char** argv)
{
glutInit(argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("Cuda GL Interop (VBO)");
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutMotionFunc(motion);
glutTimerFunc(REFRESH_DELAY, timerEvent, 0);
// initialize necessary OpenGL extensions
if (!isGLVersionSupported(2, 0))
{
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
return false;
}
// default initialization
glClearColor(0.0, 0.0, 0.0, 1.0);
glDisable(GL_DEPTH_TEST);
// viewport
glViewport(0, 0, window_width, window_height);
// projection
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, (GLfloat)window_width / (GLfloat)window_height, 0.1, 10.0);
SDK_CHECK_ERROR_GL();
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
bool runTest(int argc, char** argv, char* ref_file)
{
// Create the CUTIL timer
sdkCreateTimer(&timer);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
int devID = findCudaDevice(argc, (const char**)argv);
// command line mode only
if (ref_file != NULL)
{
// create VBO
checkCudaErrors(cudaMalloc((void**)&d_vbo_buffer, mesh_width * mesh_height * 4 * sizeof(float)));
// run the cuda part
runAutoTest(devID, argv, ref_file);
// check result of Cuda step
checkResultCuda(argc, argv, vbo);
cudaFree(d_vbo_buffer);
d_vbo_buffer = NULL;
}
else
{
// First initialize OpenGL context, so we can properly set the GL for CUDA.
// This is necessary in order to achieve optimal performance with OpenGL/CUDA interop.
if (false == initGL(&argc, argv))
{
return false;
}
// register callbacks
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutMouseFunc(mouse);
glutMotionFunc(motion);
glutCloseFunc(cleanup);
// create VBO
createVBOseqential(&loc_vbo);
//allocate state grid on host
state = (int*)malloc(mesh_width * mesh_width * mesh_height * sizeof(int));
state_next = (int*)malloc(mesh_width * mesh_width * mesh_height * sizeof(int));
//initialize first state
initializeStartState(NULL, NULL, NULL, NULL);
//sequential run comment out runCuda when you use this.
runSequential(&loc_vbo, state, state_next);
memcpy(state, state_next, sizeof(int) * mesh_width * mesh_width * mesh_width);
//printf("\n after an iteration state address is %d", state);
int mid = mesh_width / 2;
int tx, ty, tz;
tx = mid;
ty = mid;
tz = mid;
int loc = (mesh_width * (ty * mesh_width + tx)) + tz;
//printf("\nafter process value at the offset %d = %d",loc, state[loc]);
tx = mid+1;
ty = mid+1;
tz = mid+1;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
//printf("\nafter process value at the offset %d = %d", loc, state[loc]);
tx = mid-1;
ty = mid-1;
tz = mid-1;
loc = (mesh_width * (ty * mesh_width + tx)) + tz;
//printf("\nafter process value at the offset %d = %d", loc, state[loc]);
//end of sequential call
// start rendering mainloop
glutMainLoop();
}
return true;
}
void runSequential(GLuint* vbo_resource, int* state, int* state_next) {
float* temp4Floats = (float*)malloc(sizeof(float)*4);
for (int x = 0; x < mesh_width; x++) {
for (int y = 0; y < mesh_width; y++) {
for (int z = 0; z < mesh_width; z++) {
//printf("computing (%d %d %d) ", x, y, z);
int my_offset = (mesh_width * (y * mesh_width + x)) + z;
// calculate uvw coordinates
float u = x / (float)mesh_width;
float v = y / (float)mesh_height;
float w = z / (float)mesh_width;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
w = w * 2.0f - 1.0f;
//evolution rules
int count_live = 0, count_dead = 0;
//thank you ms excel
int dx[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
int dy[] = { -1, -1, -1, 0, 0, 0, 1, 1, 1, -1, -1, -1, 0, 0, 0, 1, 1, 1, -1, -1, -1, 0, 0, 0, 1, 1, 1 };
int dz[] = { -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1 };
for (int i = 0; i < 27; i++) {
int nx = x + dx[i], ny = y + dy[i], nz = z + dz[i];
if (MAX(MAX(nx, ny), nz) >= mesh_width || MIN(MIN(nx, ny), nz) < 0) {
continue;
}
if (nx == x && ny == y && nz == z) {
continue;
}
int offset = (mesh_width * (ny * mesh_width + nx)) + nz;
if (state[offset] == 1) {
count_live++;
//printf("\nvisiting xyz = %d %d %d neighbor = %d %d %d found alive cell alive count is %d", x, y, z, nx, ny, nz, count_live);
}
else count_dead++;
}
//state_next[my_offset] = state[my_offset];
//*(temp4Floats + 0) = u;*(temp4Floats + 1) = v;*(temp4Floats + 2) = w;*(temp4Floats + 3) = 1.0f;
//glBufferSubData(GL_ARRAY_BUFFER, 16 * my_offset, GL_DYNAMIC_DRAW, temp4Floats);
//continue;
//basic test rule, spawn new if neighborhood has 2 alive and im empty
// & write output vertex
//TODO while we can still get the idea of the performance of this, for some reason points are
//rendered of one half of the world, need to investigate wat is wrong, glBufferSubData needs investigation
if (state[my_offset] == 0 && count_live == 2) {
state_next[my_offset] = 1;
*(temp4Floats + 0) = u;*(temp4Floats + 1) = v;*(temp4Floats + 2) = w;*(temp4Floats + 3) = 1.0f;
glBindBuffer(GL_ARRAY_BUFFER, loc_vbo);
glVertexPointer(3, GL_FLOAT, 16, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glBufferSubData(GL_ARRAY_BUFFER, 16*my_offset, GL_DYNAMIC_DRAW, temp4Floats);
glDisableClientState(GL_VERTEX_ARRAY);
}
else if (state[my_offset] == 1 && count_live >= 6) {
state_next[my_offset] = 0;
*(temp4Floats + 0) = 1;*(temp4Floats + 1) = 1;*(temp4Floats + 2) = 1;*(temp4Floats + 3) = 1.0f;
glBindBuffer(GL_ARRAY_BUFFER, loc_vbo);
glVertexPointer(3, GL_FLOAT, 16, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glBufferSubData(GL_ARRAY_BUFFER, 16*my_offset, GL_DYNAMIC_DRAW, temp4Floats);
glDisableClientState(GL_VERTEX_ARRAY);
}
else {
//printf("\nelse offset in cuda: %d, xyz %d %d %d, uvw %f %f %f", my_offset, x, y, z, u, v, w);
state_next[my_offset] = state[my_offset];
*(temp4Floats + 0) = 1;*(temp4Floats + 1) = 1;*(temp4Floats + 2) = 1;*(temp4Floats + 3) = 1.0f;
glBindBuffer(GL_ARRAY_BUFFER, loc_vbo);
glVertexPointer(3, GL_FLOAT, 16, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glBufferSubData(GL_ARRAY_BUFFER, 16*my_offset, GL_DYNAMIC_DRAW, temp4Floats);
glDisableClientState(GL_VERTEX_ARRAY);
}
}
}
}
free(temp4Floats);
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void runCuda(struct cudaGraphicsResource** vbo_resource, int* state, int* state_next, int* d_state, int* d_state_next)
{
// map OpenGL buffer object for writing from CUDA
float4* dptr;
checkCudaErrors(cudaGraphicsMapResources(1, vbo_resource, 0));
size_t num_bytes;
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&dptr, &num_bytes,
*vbo_resource));
//printf("CUDA mapped VBO: May access %ld bytes\n", num_bytes);
//allocate device memory
checkCudaErrors(cudaMalloc((void**)&d_state, mesh_width * mesh_width * mesh_height * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&d_state_next, mesh_width * mesh_width * mesh_height * sizeof(int)));
//printf(state);printf(state_next);printf(d_state);printf(d_state_next);
//fill device memory
checkCudaErrors(cudaMemcpy(d_state, state, mesh_width * mesh_width * mesh_height * sizeof(int), cudaMemcpyHostToDevice));
//printf("\nmemcpy happened fine\n");
/* CUDA KERNEL CALL */
int thread1D = 8; //MAX 64 for this GTX 1660Ti
dim3 block(thread1D, thread1D, thread1D);
dim3 grid(mesh_width / block.x, mesh_width / block.y, mesh_height / block.z);
simple_vbo_kernel <<< grid, block >>> (dptr, mesh_width, mesh_width, mesh_height, g_fAnim, d_state, d_state_next);
//next input state updated
checkCudaErrors(cudaMemcpy(state, d_state_next, mesh_width * mesh_width * mesh_height * sizeof(int), cudaMemcpyDeviceToHost));
//int mid = mesh_width/2, x = mid, y = mid, z = mid, loc = (mesh_width * (y * mesh_width + x)) + z;
//state[loc] = 1;
//printf("\nvalue at state[mid] on host after cudaCpy = %d", state[loc]);
//flush device memory
checkCudaErrors(cudaFree(d_state));checkCudaErrors(cudaFree(d_state_next));
//state_next freeing causing exceptions, TODO investigate reason
//free(state_next);
// unmap buffer object
checkCudaErrors(cudaGraphicsUnmapResources(1, vbo_resource, 0));
}
#ifdef _WIN32
#ifndef FOPEN
#define FOPEN(fHandle,filename,mode) fopen_s(&fHandle, filename, mode)
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle,filename,mode) (fHandle = fopen(filename, mode))
#endif
#endif
void sdkDumpBin2(void* data, unsigned int bytes, const char* filename)
{
printf("sdkDumpBin: <%s>\n", filename);
FILE* fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void runAutoTest(int devID, char** argv, char* ref_file)
{
char* reference_file = NULL;
void* imageData = malloc(mesh_width * mesh_height * sizeof(float));
// execute the kernel
//launch_kernel((float4*)d_vbo_buffer, mesh_width, mesh_height, g_fAnim);
cudaDeviceSynchronize();
getLastCudaError("launch_kernel failed");
checkCudaErrors(cudaMemcpy(imageData, d_vbo_buffer, mesh_width * mesh_height * sizeof(float), cudaMemcpyDeviceToHost));
sdkDumpBin2(imageData, mesh_width * mesh_height * sizeof(float), "simpleGL.bin");
reference_file = sdkFindFilePath(ref_file, argv[0]);
if (reference_file &&
!sdkCompareBin2BinFloat("simpleGL.bin", reference_file,
mesh_width * mesh_height * sizeof(float),
MAX_EPSILON_ERROR, THRESHOLD, pArgv[0]))
{
g_TotalErrors++;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Create VBO
////////////////////////////////////////////////////////////////////////////////
void createVBO(GLuint* vbo, struct cudaGraphicsResource** vbo_res,
unsigned int vbo_res_flags)
{
assert(vbo);
// create buffer object
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
// initialize buffer object
unsigned int size = mesh_width * mesh_width * mesh_height * 4 * sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// register this buffer object with CUDA
checkCudaErrors(cudaGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags));
SDK_CHECK_ERROR_GL();
}
/*
for sequential create VBO
*/
void createVBOseqential(GLuint* vbo) {
assert(vbo);
// create buffer object
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
// initialize buffer object
unsigned int size = mesh_width * mesh_width * mesh_height * 4 * sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);// do it here.
glBindBuffer(GL_ARRAY_BUFFER, 0);
SDK_CHECK_ERROR_GL();
}
////////////////////////////////////////////////////////////////////////////////
//! Delete VBO
////////////////////////////////////////////////////////////////////////////////
void deleteVBO(GLuint* vbo, struct cudaGraphicsResource* vbo_res)
{
// unregister this buffer object with CUDA
checkCudaErrors(cudaGraphicsUnregisterResource(vbo_res));
glBindBuffer(1, *vbo);
glDeleteBuffers(1, vbo);
*vbo = 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Display callback
////////////////////////////////////////////////////////////////////////////////
void display()
{
sdkStartTimer(&timer);
//sequential state update
runSequential(&loc_vbo, state, state_next);
memcpy(state, state_next, sizeof(int) * mesh_width * mesh_width * mesh_width);
//end of sequential block
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// set view matrix
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0, 0.0, translate_z);
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_y, 0.0, 1.0, 0.0);
// render from the vbo
glBindBuffer(GL_ARRAY_BUFFER, loc_vbo);
glVertexPointer(4, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glColor3f(0.0, 1.0, 0.4);
glPointSize(1.0f);
glDrawArrays(GL_POINTS, 0, mesh_width * mesh_width * mesh_height); //added third multiplier
glDisableClientState(GL_VERTEX_ARRAY);
glutSwapBuffers();
g_fAnim += 0.01f;
sdkStopTimer(&timer);
computeFPS();
}
void timerEvent(int value)
{
if (glutGetWindow())
{
glutPostRedisplay();
glutTimerFunc(REFRESH_DELAY, timerEvent, 0);
}
}
void cleanup()
{
sdkDeleteTimer(&timer);
if (vbo)
{
deleteVBO(&vbo, cuda_vbo_resource);
}
if (loc_vbo) {
deleteVBO(&loc_vbo, cuda_loc_vbo_resource);
}
free(state);free(state_next);
}
////////////////////////////////////////////////////////////////////////////////
//! Keyboard events handler
////////////////////////////////////////////////////////////////////////////////
void keyboard(unsigned char key, int /*x*/, int /*y*/)
{
switch (key)
{
case (27):
glutDestroyWindow(glutGetWindow());
return;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Mouse event handlers
////////////////////////////////////////////////////////////////////////////////
void mouse(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
mouse_buttons |= 1 << button;
}
else if (state == GLUT_UP)
{
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
}
void motion(int x, int y)
{
float dx, dy;
dx = (float)(x - mouse_old_x);
dy = (float)(y - mouse_old_y);
if (mouse_buttons & 1)
{
rotate_x += dy * 0.2f;
rotate_y += dx * 0.2f;
}
else if (mouse_buttons & 4)
{
translate_z += dy * 0.01f;
}
mouse_old_x = x;
mouse_old_y = y;
}
////////////////////////////////////////////////////////////////////////////////
//! Check if the result is correct or write data to file for external
//! regression testing
////////////////////////////////////////////////////////////////////////////////
void checkResultCuda(int argc, char** argv, const GLuint& vbo)
{
if (!d_vbo_buffer)
{
checkCudaErrors(cudaGraphicsUnregisterResource(cuda_vbo_resource));
// map buffer object
glBindBuffer(GL_ARRAY_BUFFER, vbo);
float* data = (float*)glMapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY);
// check result
if (checkCmdLineFlag(argc, (const char**)argv, "regression"))
{
// write file for regression test
sdkWriteFile<float>("./data/regression.dat",
data, mesh_width * mesh_height * 3, 0.0, false);
}
// unmap GL buffer object
if (!glUnmapBuffer(GL_ARRAY_BUFFER))
{
fprintf(stderr, "Unmap buffer failed.\n");
fflush(stderr);
}
checkCudaErrors(cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo,
cudaGraphicsMapFlagsWriteDiscard));
SDK_CHECK_ERROR_GL();
}
}
|
a5bb9397fa3157461c8f1b29b224481fabc9fb62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "j2d5pt-256-10-128_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_10(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_9_0;
double __reg_9_1;
double __reg_9_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_9_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(3, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(5, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(6, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(7, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(8, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(9, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(10, __reg_9_0, __reg_9_1, __reg_9_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(10, __reg_9_0, __reg_9_1, __reg_9_2);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
}
__global__ void kernel0_9(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 238;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_8_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(2, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(3, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(5, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(6, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(7, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(8, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(9, __reg_8_2, __reg_8_0, __reg_8_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(9, __reg_8_2, __reg_8_0, __reg_8_1);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2);
}
}
else
{
for (__h = 19; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
}
}
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(1, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(5, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(6, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_0);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 242;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_2, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(5, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(6, __reg_6_2, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1);
}
}
else
{
for (__h = 15; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_1, __reg_5_2, __reg_5_0);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(5, __reg_5_1, __reg_5_2, __reg_5_0);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(6, __reg_5_2, __reg_5_0, __reg_5_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(6, __reg_5_2, __reg_5_0, __reg_5_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(1, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0);
}
}
else
{
for (__h = 11; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_2, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_1, __reg_2_2, __reg_2_0);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(1, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
}
}
| a5bb9397fa3157461c8f1b29b224481fabc9fb62.cu | #include "j2d5pt-256-10-128_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_10(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_9_0;
double __reg_9_1;
double __reg_9_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_9_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(3, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(5, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(6, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(7, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(8, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(9, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(10, __reg_9_0, __reg_9_1, __reg_9_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(10, __reg_9_0, __reg_9_1, __reg_9_2);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
}
__global__ void kernel0_9(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 238;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_8_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(2, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(3, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(5, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(6, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(7, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(8, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(9, __reg_8_2, __reg_8_0, __reg_8_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(9, __reg_8_2, __reg_8_0, __reg_8_1);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2);
}
}
else
{
for (__h = 19; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
}
}
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(1, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(5, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(6, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_0);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 242;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_2, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(5, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(6, __reg_6_2, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1);
}
}
else
{
for (__h = 15; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_1, __reg_5_2, __reg_5_0);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(5, __reg_5_1, __reg_5_2, __reg_5_0);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(6, __reg_5_2, __reg_5_0, __reg_5_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(6, __reg_5_2, __reg_5_0, __reg_5_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(1, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0);
}
}
else
{
for (__h = 11; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_2, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_1, __reg_2_2, __reg_2_0);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(1, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
}
}
|
cf9d35881efbd0574533f0d554f9746b78b0a7ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
// Matrix transpose kernel - Consider 1 block contain all the elements in 1 row of the Matrix
// So 1024 is the maximum number or elements should contain in a row
// IMPORTANT : Assuming grid is 1D and nx=ny
//
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void matrix_transpose_k1(float* input,float* output,const int nx, const int ny)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.x*blockDim.x;
//printf("gid : %d , offset : %d , index : %d ,value : %f \n", gid, offset, offset + blockIdx.x,input[offset + blockIdx.x]);
output[gid] = input[offset + blockIdx.x];
}
// Copy the input matrix elements to output matrix with coalesed access and write it same way
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void copy_rows(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[iy*ny + ix] = input[iy*nx + ix];
}
}
//// Copy the input matrix elements to output matrix with stride and write it same way
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void copy_columns(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[ix*ny + iy] = input[ix*ny + iy];
}
}
// Read the elements from input matrix in coalesed manner and write to output matrix in stride manner
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void read_coaleased_write_stride_mat_trans(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[ix*ny + iy] = input[iy*nx + ix];
}
}
__global__ void read_stride_write_coaleased_mat_trans(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[iy*nx + ix] = input[ix*ny + iy];
}
}
void run_matrix_transpose_k1()
{
int ny = 1<<15;
int nx = 1 << 15;
int blockx = 128;
int blocky = 128;
float * h_matrix, *h_output;
int mat_size = nx * ny;
int mat_byte_size = sizeof(int)*mat_size;
h_matrix = (float*)malloc(mat_byte_size);
h_output = (float*)malloc(mat_byte_size);
for (int i = 0; i < mat_size; i++)
{
h_matrix[i] = i;
}
printf("Printing input matrix \n");
//for (int i = 0; i < mat_size; i+=32)
//{
// if (i != 0 && i%nx == 0)
// {
// printf("\n");
// }
// printf(" %1.0f ", h_matrix[i]);
//}
//printf("\n");
dim3 grid(blockx,blocky);
dim3 blocks((nx+blockx-1)/blockx, (ny + blocky - 1) / blocky);
float * d_matrix, *d_output;
hipMalloc((float**)&d_matrix,mat_byte_size);
hipMalloc((float**)&d_output, mat_byte_size);
hipMemcpy(d_matrix,h_matrix,mat_byte_size,hipMemcpyHostToDevice);
copy_rows << <grid,blocks >> > (d_matrix,d_output,nx,ny);
hipDeviceSynchronize();
hipMemcpy(h_output,d_output,mat_byte_size,hipMemcpyDeviceToHost);
printf("Printing output matrix \n");
/*
for (int i = 0; i < ny; i++)
{
if (i != 0 && i%ny == 0)
{
printf("\n");
}
printf(" %1.0f ",h_output[i]);
}
printf("\n");*/
}
//int main()
//{
// run_matrix_transpose_k1();
// system("pause");
// return 0;
//}
| cf9d35881efbd0574533f0d554f9746b78b0a7ba.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <iostream>
// Matrix transpose kernel - Consider 1 block contain all the elements in 1 row of the Matrix
// So 1024 is the maximum number or elements should contain in a row
// IMPORTANT : Assuming grid is 1D and nx=ny
//
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void matrix_transpose_k1(float* input,float* output,const int nx, const int ny)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.x*blockDim.x;
//printf("gid : %d , offset : %d , index : %d ,value : %f \n", gid, offset, offset + blockIdx.x,input[offset + blockIdx.x]);
output[gid] = input[offset + blockIdx.x];
}
// Copy the input matrix elements to output matrix with coalesed access and write it same way
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void copy_rows(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[iy*ny + ix] = input[iy*nx + ix];
}
}
//// Copy the input matrix elements to output matrix with stride and write it same way
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void copy_columns(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[ix*ny + iy] = input[ix*ny + iy];
}
}
// Read the elements from input matrix in coalesed manner and write to output matrix in stride manner
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void read_coaleased_write_stride_mat_trans(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[ix*ny + iy] = input[iy*nx + ix];
}
}
__global__ void read_stride_write_coaleased_mat_trans(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[iy*nx + ix] = input[ix*ny + iy];
}
}
void run_matrix_transpose_k1()
{
int ny = 1<<15;
int nx = 1 << 15;
int blockx = 128;
int blocky = 128;
float * h_matrix, *h_output;
int mat_size = nx * ny;
int mat_byte_size = sizeof(int)*mat_size;
h_matrix = (float*)malloc(mat_byte_size);
h_output = (float*)malloc(mat_byte_size);
for (int i = 0; i < mat_size; i++)
{
h_matrix[i] = i;
}
printf("Printing input matrix \n");
//for (int i = 0; i < mat_size; i+=32)
//{
// if (i != 0 && i%nx == 0)
// {
// printf("\n");
// }
// printf(" %1.0f ", h_matrix[i]);
//}
//printf("\n");
dim3 grid(blockx,blocky);
dim3 blocks((nx+blockx-1)/blockx, (ny + blocky - 1) / blocky);
float * d_matrix, *d_output;
cudaMalloc((float**)&d_matrix,mat_byte_size);
cudaMalloc((float**)&d_output, mat_byte_size);
cudaMemcpy(d_matrix,h_matrix,mat_byte_size,cudaMemcpyHostToDevice);
copy_rows << <grid,blocks >> > (d_matrix,d_output,nx,ny);
cudaDeviceSynchronize();
cudaMemcpy(h_output,d_output,mat_byte_size,cudaMemcpyDeviceToHost);
printf("Printing output matrix \n");
/*
for (int i = 0; i < ny; i++)
{
if (i != 0 && i%ny == 0)
{
printf("\n");
}
printf(" %1.0f ",h_output[i]);
}
printf("\n");*/
}
//int main()
//{
// run_matrix_transpose_k1();
// system("pause");
// return 0;
//}
|
90295049e0316fa06829d979b0414971453c8f53.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeUnion<ShapeSphere, 256>
template hipError_t gpu_hpmc_free_volume<ShapeUnion<ShapeSphere, 256> >(const hpmc_free_volume_args_t &args,
const typename ShapeUnion<ShapeSphere, 256> ::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeUnion<ShapeSphere, 256> >(const hpmc_args_t& args,
const typename ShapeUnion<ShapeSphere, 256> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeSphere, 256> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 256> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeSphere, 256> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 256> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| 90295049e0316fa06829d979b0414971453c8f53.cu | // Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeUnion<ShapeSphere, 256>
template cudaError_t gpu_hpmc_free_volume<ShapeUnion<ShapeSphere, 256> >(const hpmc_free_volume_args_t &args,
const typename ShapeUnion<ShapeSphere, 256> ::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeUnion<ShapeSphere, 256> >(const hpmc_args_t& args,
const typename ShapeUnion<ShapeSphere, 256> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeSphere, 256> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 256> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeSphere, 256> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 256> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
78efe04e67d68ad843f2274aaf191b8200cc42c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cstdio>
extern "C" {
__global__
void kernelMain(int *iTab, int *jTab, const unsigned int *arg){
//int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned int n=arg[0],m=arg[1];
const unsigned int i=blockIdx.x*blockDim.x+threadIdx.x;
const unsigned int j=blockIdx.y*blockDim.y+threadIdx.y;
const int rowSize=m;
if(i>=n || j>=m)
return;
int a=i+1,b=j+1;
int x=0,lastX=1;
int y=1,lastY=0;
while(b!=0)
{
int q=a/b;
int c=a%b;
a=b;
b=c;
c=x;
x=lastX-q*x;
lastX=c;
c=y;
y=lastY-q*y;
lastY=c;
}
iTab[rowSize*i+j]=lastX;
jTab[rowSize*i+j]=lastY;
}
}
| 78efe04e67d68ad843f2274aaf191b8200cc42c4.cu | #include<cstdio>
extern "C" {
__global__
void kernelMain(int *iTab, int *jTab, const unsigned int *arg){
//int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned int n=arg[0],m=arg[1];
const unsigned int i=blockIdx.x*blockDim.x+threadIdx.x;
const unsigned int j=blockIdx.y*blockDim.y+threadIdx.y;
const int rowSize=m;
if(i>=n || j>=m)
return;
int a=i+1,b=j+1;
int x=0,lastX=1;
int y=1,lastY=0;
while(b!=0)
{
int q=a/b;
int c=a%b;
a=b;
b=c;
c=x;
x=lastX-q*x;
lastX=c;
c=y;
y=lastY-q*y;
lastY=c;
}
iTab[rowSize*i+j]=lastX;
jTab[rowSize*i+j]=lastY;
}
}
|
c6ff18cf542162e6a75dcab3b5a280449fbdc88b.hip | // !!! This is a file automatically generated by hipify!!!
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
namespace at { namespace native {
void bitwise_not_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) {
return !a;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ~a;
});
});
}
}
void logical_not_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(1), "logical_not_cuda", [&]() {
using self_t = scalar_t;
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(0), "logical_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(self_t a) -> scalar_t { return static_cast<scalar_t>(!a); });
});
});
}
void ceil_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "ceil_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::ceil(a);
});
});
}
void expm1_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "expm1_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::expm1(a);
});
});
}
void floor_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "floor_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::floor(a);
});
});
}
void log_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log(a);
});
});
}
void log10_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log10_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log10(a);
});
});
}
void neg_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, iter.dtype(), "neg_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return -a;
});
});
}
// We manually overload nearbyint because std::nearbyint does not work with ROCm.
template <typename scalar_t>
__host__ __device__ static inline scalar_t nearbyint_wrapper(scalar_t a) {
return static_cast<scalar_t>(::nearbyintf(static_cast<float>(a)));
}
__host__ __device__ static inline double nearbyint_wrapper(double a) {
return ::nearbyint(a);
}
void round_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "round_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// We do not use std::round because we would like to round midway numbers to the nearest even integer.
return nearbyint_wrapper(a);
});
});
}
// We manually overload trunc because std::trunc does not work with ROCm.
template <typename scalar_t>
__host__ __device__ static inline scalar_t trunc_wrapper(scalar_t a) {
return static_cast<scalar_t>(::truncf(static_cast<float>(a)));
}
__host__ __device__ static inline double trunc_wrapper(double a) {
return ::trunc(a);
}
void trunc_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "trunc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return trunc_wrapper(a);
});
});
}
void rsqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "rsqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float.
return ::rsqrt(a);
});
});
}
void sign_kernel_cuda(TensorIterator& iter){
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a){
return a;
});
} else {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, iter.dtype(), "sign_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
scalar_t zero = scalar_t(0);
return (zero < a) - (a < zero);
});
});
}
}
void erfinv_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
}
void digamma_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "digamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_digamma(a);
});
});
}
void trigamma_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "trigamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_trigamma(a);
});
});
}
void polygamma_kernel_cuda(TensorIterator& iter, int64_t n) {
switch (n) {
case 0: digamma_kernel_cuda(iter); break;
case 1: trigamma_kernel_cuda(iter); break;
default: TORCH_CHECK(false, "polygamma(n,x) is not implemented for n>=2, but was ", n);
}
}
void lgamma_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "lgamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::lgamma(a);
});
});
}
REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda);
REGISTER_DISPATCH(logical_not_stub, &logical_not_kernel_cuda);
REGISTER_DISPATCH(ceil_stub, &ceil_kernel_cuda);
REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda);
REGISTER_DISPATCH(floor_stub, &floor_kernel_cuda);
REGISTER_DISPATCH(log_stub, &log_kernel_cuda);
REGISTER_DISPATCH(log10_stub, &log10_kernel_cuda);
REGISTER_DISPATCH(neg_stub, &neg_kernel_cuda);
REGISTER_DISPATCH(round_stub, &round_kernel_cuda);
REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda);
REGISTER_DISPATCH(sign_stub, &sign_kernel_cuda);
REGISTER_DISPATCH(trunc_stub, &trunc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(digamma_stub, &digamma_kernel_cuda);
REGISTER_DISPATCH(polygamma_stub, &polygamma_kernel_cuda);
REGISTER_DISPATCH(lgamma_stub, &lgamma_kernel_cuda);
}}
| c6ff18cf542162e6a75dcab3b5a280449fbdc88b.cu | #include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
namespace at { namespace native {
void bitwise_not_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) {
return !a;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ~a;
});
});
}
}
void logical_not_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(1), "logical_not_cuda", [&]() {
using self_t = scalar_t;
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(0), "logical_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(self_t a) -> scalar_t { return static_cast<scalar_t>(!a); });
});
});
}
void ceil_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "ceil_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return std::ceil(a);
});
});
}
void expm1_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "expm1_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::expm1(a);
});
});
}
void floor_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "floor_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return std::floor(a);
});
});
}
void log_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return std::log(a);
});
});
}
void log10_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log10_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log10(a);
});
});
}
void neg_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, iter.dtype(), "neg_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return -a;
});
});
}
// We manually overload nearbyint because std::nearbyint does not work with ROCm.
template <typename scalar_t>
__host__ __device__ static inline scalar_t nearbyint_wrapper(scalar_t a) {
return static_cast<scalar_t>(::nearbyintf(static_cast<float>(a)));
}
__host__ __device__ static inline double nearbyint_wrapper(double a) {
return ::nearbyint(a);
}
void round_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "round_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// We do not use std::round because we would like to round midway numbers to the nearest even integer.
return nearbyint_wrapper(a);
});
});
}
// We manually overload trunc because std::trunc does not work with ROCm.
template <typename scalar_t>
__host__ __device__ static inline scalar_t trunc_wrapper(scalar_t a) {
return static_cast<scalar_t>(::truncf(static_cast<float>(a)));
}
__host__ __device__ static inline double trunc_wrapper(double a) {
return ::trunc(a);
}
void trunc_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "trunc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return trunc_wrapper(a);
});
});
}
void rsqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "rsqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float.
return ::rsqrt(a);
});
});
}
void sign_kernel_cuda(TensorIterator& iter){
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a){
return a;
});
} else {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, iter.dtype(), "sign_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
scalar_t zero = scalar_t(0);
return (zero < a) - (a < zero);
});
});
}
}
void erfinv_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
}
void digamma_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "digamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_digamma(a);
});
});
}
void trigamma_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "trigamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_trigamma(a);
});
});
}
void polygamma_kernel_cuda(TensorIterator& iter, int64_t n) {
switch (n) {
case 0: digamma_kernel_cuda(iter); break;
case 1: trigamma_kernel_cuda(iter); break;
default: TORCH_CHECK(false, "polygamma(n,x) is not implemented for n>=2, but was ", n);
}
}
void lgamma_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "lgamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::lgamma(a);
});
});
}
REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda);
REGISTER_DISPATCH(logical_not_stub, &logical_not_kernel_cuda);
REGISTER_DISPATCH(ceil_stub, &ceil_kernel_cuda);
REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda);
REGISTER_DISPATCH(floor_stub, &floor_kernel_cuda);
REGISTER_DISPATCH(log_stub, &log_kernel_cuda);
REGISTER_DISPATCH(log10_stub, &log10_kernel_cuda);
REGISTER_DISPATCH(neg_stub, &neg_kernel_cuda);
REGISTER_DISPATCH(round_stub, &round_kernel_cuda);
REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda);
REGISTER_DISPATCH(sign_stub, &sign_kernel_cuda);
REGISTER_DISPATCH(trunc_stub, &trunc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(digamma_stub, &digamma_kernel_cuda);
REGISTER_DISPATCH(polygamma_stub, &polygamma_kernel_cuda);
REGISTER_DISPATCH(lgamma_stub, &lgamma_kernel_cuda);
}}
|
6150ddb15ecaafe2d54a1a35af0b41ea521347cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <vector>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
using namespace cv;
Mat load_image(const char *source_image) {
Mat image = imread(source_image, CV_LOAD_IMAGE_GRAYSCALE);
return image;
}
int save_image(Mat &image, const char *path) {
vector<int> output_image_params;
output_image_params.push_back(CV_IMWRITE_JPEG_QUALITY);
output_image_params.push_back(100);
bool is_success = imwrite(path, image, output_image_params);
if (!is_success) {
cout << "Could not save image" << endl;
return -1;
}
return 0;
}
void check_arguments(int argc) {
if(argc < 3) {
cout << "Run program with following format: ";
cout << "./binarization_gpu <block_size> <image_name>" << endl;
return -1;
}
}
__global__ void sauvolaBinarization(unsigned char *source_image, unsigned char *output_image, int width, int height, int step) {
int x, y;
int r = 13;
float k = 0.12;
float m, sum, variance, varianceSum, treshold;
int MAX_STANDARD_DEVIATION = 128;
int n = (2 * r + 1) * (2 * r + 1);
int index = blockIdx.x * blockDim.x + threadIdx.x;
int indexColumn = index % step;
int delimiter = step * height - r * step;
int indexRow = index / step;
if (indexColumn > r && indexColumn < step - r && index > r * step && index < delimiter) {
sum = 0;
varianceSum = 0;
for (y = indexRow - r; y <= indexRow + r; y++) {
for (x = indexColumn - r; x <= indexColumn + r; x++) {
sum += source_image[y * step + x];
}
}
m = sum / n;
for (y = indexRow - r; y <= indexRow + r; y++) {
for (x = indexColumn - r; x <= indexColumn + r; x++) {
varianceSum += (source_image[y * step + x] - m) * (source_image[y * step + x] - m);
}
}
variance = sqrt(varianceSum / n);
treshold = (m * (1.0 + k * (variance / MAX_STANDARD_DEVIATION - 1.0)));
output_image[index] = (source_image[index] > treshold) ? 255 : 0;
} else {
output_image[index] = (source_image[index] > 128) ? 255 : 0;
}
}
int main(int argc, char **argv) {
check_arguments(argc);
int w, h, step;
float time_difference;
string infname = "./original_images/" + string(argv[2]) + ".pgm";
string output_path = "./binarized_images/" + string(argv[2]) + ".pgm";
Mat source_image = load_image(infname.c_str());
Mat output_image = source_image.clone();
if (!source_image.data) {
cout << "Could not open image" << endl;
return -1;
}
w = source_image.cols;
h = source_image.rows;
step = source_image.step;
int block_size = atoi(argv[1]);
int image_size = w * h;
int grid_size = ceil(image_size / block_size);
unsigned char *gpu_source = nullptr;
unsigned char *gpu_output = nullptr;
hipEvent_t timer_start, timer_stop;
hipEventCreate(&timer_start);
hipEventCreate(&timer_stop);
hipEventRecord(timer_start, 0);
hipMalloc<unsigned char>(&gpu_source, h * step);
hipMalloc<unsigned char>(&gpu_output, h * step);
hipMemcpy(gpu_source, source_image.data, h * step, hipMemcpyHostToDevice);
hipMemcpy(gpu_output, output_image.data, h * step, hipMemcpyHostToDevice);
sauvolaBinarization << < grid_size, block_size >> > (gpu_source, gpu_output, w, h, step);
hipDeviceSynchronize();
hipMemcpy(output_image.data, gpu_output, h * step, hipMemcpyDeviceToHost);
hipEventRecord(timer_stop, 0);
hipEventSynchronize(timer_stop);
hipEventElapsedTime(&time_difference, timer_start, timer_stop);
save_image(output_image, output_path.c_str());
cout << "Binarization time: " << time_difference << " ms" << endl;
hipFree(gpu_source);
hipFree(gpu_output);
return 0;
} | 6150ddb15ecaafe2d54a1a35af0b41ea521347cf.cu | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <vector>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
using namespace cv;
Mat load_image(const char *source_image) {
Mat image = imread(source_image, CV_LOAD_IMAGE_GRAYSCALE);
return image;
}
int save_image(Mat &image, const char *path) {
vector<int> output_image_params;
output_image_params.push_back(CV_IMWRITE_JPEG_QUALITY);
output_image_params.push_back(100);
bool is_success = imwrite(path, image, output_image_params);
if (!is_success) {
cout << "Could not save image" << endl;
return -1;
}
return 0;
}
void check_arguments(int argc) {
if(argc < 3) {
cout << "Run program with following format: ";
cout << "./binarization_gpu <block_size> <image_name>" << endl;
return -1;
}
}
__global__ void sauvolaBinarization(unsigned char *source_image, unsigned char *output_image, int width, int height, int step) {
int x, y;
int r = 13;
float k = 0.12;
float m, sum, variance, varianceSum, treshold;
int MAX_STANDARD_DEVIATION = 128;
int n = (2 * r + 1) * (2 * r + 1);
int index = blockIdx.x * blockDim.x + threadIdx.x;
int indexColumn = index % step;
int delimiter = step * height - r * step;
int indexRow = index / step;
if (indexColumn > r && indexColumn < step - r && index > r * step && index < delimiter) {
sum = 0;
varianceSum = 0;
for (y = indexRow - r; y <= indexRow + r; y++) {
for (x = indexColumn - r; x <= indexColumn + r; x++) {
sum += source_image[y * step + x];
}
}
m = sum / n;
for (y = indexRow - r; y <= indexRow + r; y++) {
for (x = indexColumn - r; x <= indexColumn + r; x++) {
varianceSum += (source_image[y * step + x] - m) * (source_image[y * step + x] - m);
}
}
variance = sqrt(varianceSum / n);
treshold = (m * (1.0 + k * (variance / MAX_STANDARD_DEVIATION - 1.0)));
output_image[index] = (source_image[index] > treshold) ? 255 : 0;
} else {
output_image[index] = (source_image[index] > 128) ? 255 : 0;
}
}
int main(int argc, char **argv) {
check_arguments(argc);
int w, h, step;
float time_difference;
string infname = "./original_images/" + string(argv[2]) + ".pgm";
string output_path = "./binarized_images/" + string(argv[2]) + ".pgm";
Mat source_image = load_image(infname.c_str());
Mat output_image = source_image.clone();
if (!source_image.data) {
cout << "Could not open image" << endl;
return -1;
}
w = source_image.cols;
h = source_image.rows;
step = source_image.step;
int block_size = atoi(argv[1]);
int image_size = w * h;
int grid_size = ceil(image_size / block_size);
unsigned char *gpu_source = nullptr;
unsigned char *gpu_output = nullptr;
cudaEvent_t timer_start, timer_stop;
cudaEventCreate(&timer_start);
cudaEventCreate(&timer_stop);
cudaEventRecord(timer_start, 0);
cudaMalloc<unsigned char>(&gpu_source, h * step);
cudaMalloc<unsigned char>(&gpu_output, h * step);
cudaMemcpy(gpu_source, source_image.data, h * step, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_output, output_image.data, h * step, cudaMemcpyHostToDevice);
sauvolaBinarization << < grid_size, block_size >> > (gpu_source, gpu_output, w, h, step);
cudaDeviceSynchronize();
cudaMemcpy(output_image.data, gpu_output, h * step, cudaMemcpyDeviceToHost);
cudaEventRecord(timer_stop, 0);
cudaEventSynchronize(timer_stop);
cudaEventElapsedTime(&time_difference, timer_start, timer_stop);
save_image(output_image, output_path.c_str());
cout << "Binarization time: " << time_difference << " ms" << endl;
cudaFree(gpu_source);
cudaFree(gpu_output);
return 0;
} |
e8a4a829a838ca0521fc795956253dd0430ea66b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "dcn_v2_im2col_cuda.h"
#include <cstdio>
#include <algorithm>
#include <cstring>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__device__ float dmcn_im2col_bilinear_cuda(const float *bottom_data, const int data_width,
const int height, const int width, float h, float w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
float v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
float v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
float v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__device__ float dmcn_get_gradient_weight_cuda(float argmax_h, float argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
float weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
__device__ float dmcn_get_coordinate_weight_cuda(float argmax_h, float argmax_w,
const int height, const int width, const float *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
float weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const float *data_im, const float *data_offset, const float *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
float *data_col)
{
// launch channels * batch_size * height_col * width_col cores
CUDA_KERNEL_LOOP(index, n)
{
// NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow)
// here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
// const int b_col = (index / width_col / height_col) % batch_size;
const int b_col = (index / width_col / height_col / num_channels) % batch_size;
// const int c_im = (index / width_col / height_col) / batch_size;
const int c_im = (index / width_col / height_col) % num_channels;
// const int c_col = c_im * kernel_h * kernel_w;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
// float *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
float *data_col_ptr = data_col + ((b_col * num_channels * kernel_w * kernel_h + c_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const float *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const float *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
float val = static_cast<float>(0);
const float h_im = h_in + i * dilation_h + offset_h;
const float w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear_cuda(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear_cuda(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
// data_col_ptr += batch_size * height_col * width_col;
data_col_ptr += height_col * width_col;
}
}
}
}
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const float *data_col, const float *data_offset, const float *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
float *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
const float cur_inv_h_data = h_in + i * dilation_h + offset_h;
const float cur_inv_w_data = w_in + j * dilation_w + offset_w;
const float cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
float weight = dmcn_get_gradient_weight_cuda(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const float *data_col, const float *data_im,
const float *data_offset, const float *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
float *grad_offset, float *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
float val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const float *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const float *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
float inv_h = h_in + i * dilation_h + offset_h;
float inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear_cuda(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const float weight = dmcn_get_coordinate_weight_cuda(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(hipStream_t stream,
const float* data_im, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float* data_col) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel)
, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS),
0, stream,
num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(hipStream_t stream,
const float* data_col, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float* grad_im){
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel)
, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS),
0, stream,
num_kernels, data_col, data_offset, data_mask, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(hipStream_t stream,
const float* data_col, const float* data_im, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
float* grad_offset, float* grad_mask) {
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel)
, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS),
0, stream,
num_kernels, data_col, data_im, data_offset, data_mask, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset, grad_mask);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err));
}
} | e8a4a829a838ca0521fc795956253dd0430ea66b.cu | #include "dcn_v2_im2col_cuda.h"
#include <cstdio>
#include <algorithm>
#include <cstring>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__device__ float dmcn_im2col_bilinear_cuda(const float *bottom_data, const int data_width,
const int height, const int width, float h, float w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
float v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
float v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
float v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__device__ float dmcn_get_gradient_weight_cuda(float argmax_h, float argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
float weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
__device__ float dmcn_get_coordinate_weight_cuda(float argmax_h, float argmax_w,
const int height, const int width, const float *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
float weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const float *data_im, const float *data_offset, const float *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
float *data_col)
{
// launch channels * batch_size * height_col * width_col cores
CUDA_KERNEL_LOOP(index, n)
{
// NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow)
// here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
// const int b_col = (index / width_col / height_col) % batch_size;
const int b_col = (index / width_col / height_col / num_channels) % batch_size;
// const int c_im = (index / width_col / height_col) / batch_size;
const int c_im = (index / width_col / height_col) % num_channels;
// const int c_col = c_im * kernel_h * kernel_w;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
// float *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
float *data_col_ptr = data_col + ((b_col * num_channels * kernel_w * kernel_h + c_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const float *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const float *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
float val = static_cast<float>(0);
const float h_im = h_in + i * dilation_h + offset_h;
const float w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear_cuda(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear_cuda(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
// data_col_ptr += batch_size * height_col * width_col;
data_col_ptr += height_col * width_col;
}
}
}
}
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const float *data_col, const float *data_offset, const float *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
float *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
const float cur_inv_h_data = h_in + i * dilation_h + offset_h;
const float cur_inv_w_data = w_in + j * dilation_w + offset_w;
const float cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
float weight = dmcn_get_gradient_weight_cuda(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const float *data_col, const float *data_im,
const float *data_offset, const float *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
float *grad_offset, float *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
float val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const float *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const float *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
float inv_h = h_in + i * dilation_h + offset_h;
float inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear_cuda(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const float weight = dmcn_get_coordinate_weight_cuda(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(cudaStream_t stream,
const float* data_im, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float* data_col) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
modulated_deformable_im2col_gpu_kernel
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
0, stream>>>(
num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(cudaStream_t stream,
const float* data_col, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float* grad_im){
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
modulated_deformable_col2im_gpu_kernel
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
0, stream>>>(
num_kernels, data_col, data_offset, data_mask, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(cudaStream_t stream,
const float* data_col, const float* data_im, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
float* grad_offset, float* grad_mask) {
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
modulated_deformable_col2im_coord_gpu_kernel
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
0, stream>>>(
num_kernels, data_col, data_im, data_offset, data_mask, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset, grad_mask);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err));
}
} |
f73c16e11f9f0669eacec75a3cfdb5321baa6d02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <sys/time.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define BLOCK_SIZE 32
typedef unsigned long long int LONG;
void safe_call(hipError_t ret, int line)
{
if(ret!=hipSuccess)
{
cout << "Error at line " << line << " : " << hipGetErrorString(ret) << endl;
exit(-1);
}
}
__global__ void gpuMM(double *A, double *B, double *C, LONG N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
LONG row = blockIdx.y*blockDim.y + threadIdx.y;
LONG col = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.f;
for (LONG n = 0; n < N; ++n)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
struct timeval t1,t2, tp;
double tt, gflops;
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
// Restricted to matrices where N = K*BLOCK_SIZE;
LONG N,K;
cin >> K;
N = K*BLOCK_SIZE;
CUDA_SAFE_CALL(hipSetDevice(0));
cout << "Executing Matrix Multiplcation" << endl;
cout << "Matrix size: " << N << "x" << N << endl;
#if 0
// Allocate memory on the host
double *hA,*hB,*hC;
hA = new double[N*N];
hB = new double[N*N];
hC = new double[N*N];
// Initialize matrices on the host
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
hA[j*N+i] = 2.f*(j+i);
hB[j*N+i] = 1.f*(j-i);
}
}
#endif
// Allocate memory on the device
LONG size = N*N*sizeof(double); // Size of the memory in bytes
double *dA,*dB,*dC;
CUDA_SAFE_CALL(hipMallocManaged(&dA,size));
CUDA_SAFE_CALL(hipMallocManaged(&dB,size));
CUDA_SAFE_CALL(hipMallocManaged(&dC,size));
cout << "Memory allocated on device memory." << endl;
// Initialize matrices
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
dA[j*N+i] = 2.f*(j+i);
dB[j*N+i] = 1.f*(j-i);
}
}
dim3 threadBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 grid(K,K);
gettimeofday(&t1,0);
// Copy matrices from the host to device
//CUDA_SAFE_CALL(hipMemcpy(dA,hA,size,hipMemcpyHostToDevice));
//CUDA_SAFE_CALL(hipMemcpy(dB,hB,size,hipMemcpyHostToDevice));
//Execute the matrix multiplication kernel
hipLaunchKernelGGL(( gpuMM), dim3(grid),dim3(threadBlock), 0, 0, dA,dB,dC,N);
CUDA_SAFE_CALL(hipDeviceSynchronize());
gettimeofday(&t2,0);
timersub(&t2,&t1,&tp);
tt = (double) tp.tv_sec + ((double) tp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Managed : " << gflops << endl;
#if 0
// Now do the matrix multiplication on the CPU
double sum;
for (LONG row=0; row<N; row++){
for (LONG col=0; col<N; col++){
sum = 0.f;
for (LONG n=0; n<N; n++){
sum += hA[row*N+n]*hB[n*N+col];
}
hC[row*N+col] = sum;
}
}
// Allocate memory to store the GPU answer on the host
double *C;
C = new double[N*N];
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(hipMemcpy(C,dC,size,hipMemcpyDeviceToHost));
// Check the result and make sure it is correct
for (LONG row=0; row<N; row++){
for (LONG col=0; col<N; col++){
if ( C[row*N+col] != hC[row*N+col] ){
cout << "Wrong answer!" << endl;
row = col = N;
}
}
}
#endif
cout << "Finished." << endl;
CUDA_SAFE_CALL(hipFree(dA));
CUDA_SAFE_CALL(hipFree(dB));
CUDA_SAFE_CALL(hipFree(dC));
return 0;
}
| f73c16e11f9f0669eacec75a3cfdb5321baa6d02.cu | #include <iostream>
#include <sys/time.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define BLOCK_SIZE 32
typedef unsigned long long int LONG;
void safe_call(cudaError_t ret, int line)
{
if(ret!=cudaSuccess)
{
cout << "Error at line " << line << " : " << cudaGetErrorString(ret) << endl;
exit(-1);
}
}
__global__ void gpuMM(double *A, double *B, double *C, LONG N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
LONG row = blockIdx.y*blockDim.y + threadIdx.y;
LONG col = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.f;
for (LONG n = 0; n < N; ++n)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
struct timeval t1,t2, tp;
double tt, gflops;
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
// Restricted to matrices where N = K*BLOCK_SIZE;
LONG N,K;
cin >> K;
N = K*BLOCK_SIZE;
CUDA_SAFE_CALL(cudaSetDevice(0));
cout << "Executing Matrix Multiplcation" << endl;
cout << "Matrix size: " << N << "x" << N << endl;
#if 0
// Allocate memory on the host
double *hA,*hB,*hC;
hA = new double[N*N];
hB = new double[N*N];
hC = new double[N*N];
// Initialize matrices on the host
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
hA[j*N+i] = 2.f*(j+i);
hB[j*N+i] = 1.f*(j-i);
}
}
#endif
// Allocate memory on the device
LONG size = N*N*sizeof(double); // Size of the memory in bytes
double *dA,*dB,*dC;
CUDA_SAFE_CALL(cudaMallocManaged(&dA,size));
CUDA_SAFE_CALL(cudaMallocManaged(&dB,size));
CUDA_SAFE_CALL(cudaMallocManaged(&dC,size));
cout << "Memory allocated on device memory." << endl;
// Initialize matrices
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
dA[j*N+i] = 2.f*(j+i);
dB[j*N+i] = 1.f*(j-i);
}
}
dim3 threadBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 grid(K,K);
gettimeofday(&t1,0);
// Copy matrices from the host to device
//CUDA_SAFE_CALL(cudaMemcpy(dA,hA,size,cudaMemcpyHostToDevice));
//CUDA_SAFE_CALL(cudaMemcpy(dB,hB,size,cudaMemcpyHostToDevice));
//Execute the matrix multiplication kernel
gpuMM<<<grid,threadBlock>>>(dA,dB,dC,N);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
gettimeofday(&t2,0);
timersub(&t2,&t1,&tp);
tt = (double) tp.tv_sec + ((double) tp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Managed : " << gflops << endl;
#if 0
// Now do the matrix multiplication on the CPU
double sum;
for (LONG row=0; row<N; row++){
for (LONG col=0; col<N; col++){
sum = 0.f;
for (LONG n=0; n<N; n++){
sum += hA[row*N+n]*hB[n*N+col];
}
hC[row*N+col] = sum;
}
}
// Allocate memory to store the GPU answer on the host
double *C;
C = new double[N*N];
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(cudaMemcpy(C,dC,size,cudaMemcpyDeviceToHost));
// Check the result and make sure it is correct
for (LONG row=0; row<N; row++){
for (LONG col=0; col<N; col++){
if ( C[row*N+col] != hC[row*N+col] ){
cout << "Wrong answer!" << endl;
row = col = N;
}
}
}
#endif
cout << "Finished." << endl;
CUDA_SAFE_CALL(cudaFree(dA));
CUDA_SAFE_CALL(cudaFree(dB));
CUDA_SAFE_CALL(cudaFree(dC));
return 0;
}
|
34e50ba43d905c7984a78e23e4e95a9c2d982dec.hip | // !!! This is a file automatically generated by hipify!!!
/***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <cassert>
#include <math.h>
#include <hip/hip_runtime.h>
#include "cudacommon.h"
#include "ResultDatabase.h"
#include "OptionParser.h"
#define MIN_NODES 20
#define MAX_NODES ULONG_MAX
#define MIN_EDGES 2
#define MAX_INIT_EDGES 4 // Nodes will have, on average, 2*MAX_INIT_EDGES edges
#define MIN_WEIGHT 1
#define MAX_WEIGHT 10
#define SEED 7
#define MAX_THREADS_PER_BLOCK 256
using namespace std;
struct Node
{
int starting;
int no_of_edges;
};
void initGraph(OptionParser &op, int &no_of_nodes, int &edge_list_size, int &source, Node* &h_graph_nodes, int* &h_graph_edges);
float BFSGraph(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges);
#ifdef UNIFIED_MEMORY
float BFSGraphUnifiedMemory(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges);
#endif
////////////////////////////////////////////////////////////////////////////////
__global__ void Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_updating_graph_mask, bool *g_graph_visited, int* g_cost, int no_of_nodes)
{
int tid = (blockIdx.x*MAX_THREADS_PER_BLOCK) + threadIdx.x;
if( tid<no_of_nodes && g_graph_mask[tid])
{
g_graph_mask[tid]=false;
for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++)
{
int id = g_graph_edges[i];
if(!g_graph_visited[id])
{
g_cost[id]=g_cost[tid]+1;
g_updating_graph_mask[id]=true;
}
}
}
}
__global__ void Kernel2( bool* g_graph_mask, bool *g_updating_graph_mask, bool* g_graph_visited, bool *g_over, int no_of_nodes)
{
int tid = (blockIdx.x*MAX_THREADS_PER_BLOCK) + threadIdx.x;
if( tid<no_of_nodes && g_updating_graph_mask[tid])
{
g_graph_mask[tid]=true;
g_graph_visited[tid]=true;
*g_over=true;
g_updating_graph_mask[tid]=false;
}
}
////////////////////////////////////////////////////////////////////////////////
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ****************************************************************************
void addBenchmarkSpecOptions(OptionParser &op) {
}
// ****************************************************************************
// Function: RunBenchmark
//
// Purpose:
// Executes the radix sort benchmark
//
// Arguments:
// resultDB: results from the benchmark are stored in this db
// op: the options parser / parameter database
//
// Returns: nothing, results are stored in resultDB
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ****************************************************************************
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
printf("Running BFS\n");
int device;
hipGetDevice(&device);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
// seed random number generator
srand(SEED);
int no_of_nodes = 0;
int edge_list_size = 0;
int source = 0;
Node* h_graph_nodes;
int* h_graph_edges;
initGraph(op, no_of_nodes, edge_list_size, source, h_graph_nodes, h_graph_edges);
// atts string for result database
char tmp[64];
sprintf(tmp, "%dV,%dE", no_of_nodes, edge_list_size);
string atts = string(tmp);
bool quiet = op.getOptionBool("quiet");
int passes = op.getOptionInt("passes");
for(int i = 0; i < passes; i++) {
if(!quiet) {
printf("Pass %d:\n", i);
}
float time = BFSGraph(resultDB, op, no_of_nodes, edge_list_size, source, h_graph_nodes, h_graph_edges);
if(!quiet) {
if(time == FLT_MAX) {
printf("Executing BFS...Error.\n");
} else {
printf("Executing BFS...Done.\n");
}
}
#ifdef UNIFIED_MEMORY
float timeUM = BFSGraphUnifiedMemory(resultDB, op, no_of_nodes, edge_list_size, source, h_graph_nodes, h_graph_edges);
if(!quiet) {
if(timeUM == FLT_MAX) {
printf("Executing BFS using unified memory...Error.\n");
} else {
printf("Executing BFS using unified memory...Done.\n");
}
}
if(time != FLT_MAX && timeUM != FLT_MAX) {
resultDB.AddResult("bfs_unifiedmem_speedup", atts, "N", time/timeUM);
}
#endif
}
free( h_graph_nodes);
free( h_graph_edges);
}
////////////////////////////////////////////////////////////////////////////////
//Generate uniform distribution
////////////////////////////////////////////////////////////////////////////////
int uniform_distribution(int rangeLow, int rangeHigh) {
double myRand = rand()/(1.0 + RAND_MAX);
int range = rangeHigh - rangeLow + 1;
int myRand_scaled = (myRand * range) + rangeLow;
return myRand_scaled;
}
////////////////////////////////////////////////////////////////////////////////
//Initialize Graph
////////////////////////////////////////////////////////////////////////////////
void initGraph(OptionParser &op, int &no_of_nodes, int &edge_list_size, int &source, Node* &h_graph_nodes, int* &h_graph_edges) {
bool quiet = op.getOptionBool("quiet");
// open input file for reading
FILE *fp = NULL;
string infile = op.getOptionString("inputFile");
if(infile != "") {
fp = fopen(infile.c_str(),"r");
if(!fp && !quiet)
{
printf("Error: Unable to read graph file %s.\n", infile.c_str());
}
}
if(!quiet) {
if(fp) {
printf("Reading graph file\n");
} else {
printf("Generating graph with problem size %d\n", (int)op.getOptionInt("size"));
}
}
// initialize number of nodes
if(fp) {
int n = fscanf(fp,"%d",&no_of_nodes);
assert(n == 1);
} else {
int problemSizes[4] = {10, 50, 200, 400};
no_of_nodes = problemSizes[op.getOptionInt("size") - 1] * 1024 * 1024;
}
// initalize the nodes & number of edges
h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
int start;
int edgeno;
for( int i = 0; i < no_of_nodes; i++)
{
if(fp) {
int n = fscanf(fp,"%d %d",&start,&edgeno);
assert(n == 2);
} else {
start = edge_list_size;
edgeno = rand() % (MAX_INIT_EDGES - MIN_EDGES + 1) + MIN_EDGES;
}
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
edge_list_size += edgeno;
}
// initialize the source node
if(fp) {
int n = fscanf(fp,"%d",&source);
assert(n == 1);
} else {
source = uniform_distribution(0, no_of_nodes - 1);
}
source = 0;
if(fp) {
int edges;
int n = fscanf(fp,"%d",&edges);
assert(n == 1);
assert(edges == edge_list_size);
}
// initialize the edges
int id;
int cost;
h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++)
{
if(fp) {
int n = fscanf(fp,"%d %d",&id, &cost);
assert(n == 2);
} else {
id = uniform_distribution(0, no_of_nodes - 1);
//cost = rand() % (MAX_WEIGHT - MIN_WEIGHT + 1) + MIN_WEIGHT;
}
h_graph_edges[i] = id;
}
if(!quiet) {
if(fp) {
fclose(fp);
printf("Done reading graph file\n");
} else {
printf("Done generating graph\n");
}
printf("Graph size: %d nodes, %d edges\n", no_of_nodes, edge_list_size);
}
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
float BFSGraph(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges)
{
bool verbose = op.getOptionBool("verbose");
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// allocate host memory
bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
// initalize the memory
for( int i = 0; i < no_of_nodes; i++)
{
h_graph_mask[i]=false;
h_updating_graph_mask[i]=false;
h_graph_visited[i]=false;
}
//set the source node as true in the mask
h_graph_mask[source]=true;
h_graph_visited[source]=true;
// allocate mem for the result on host side
int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++) {
h_cost[i]=-1;
}
h_cost[source]=0;
// node list
Node* d_graph_nodes;
// edge list
int* d_graph_edges;
// mask
bool* d_graph_mask;
bool* d_updating_graph_mask;
// visited nodes
bool* d_graph_visited;
// result
int* d_cost;
// bool if execution is over
bool *d_over;
CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes));
CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size));
CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes));
CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes));
CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes));
CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes));
CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_over, sizeof(bool)));
hipError_t err = hipGetLastError();
if(err != hipSuccess) {
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
hipFree(d_graph_nodes);
hipFree(d_graph_edges);
hipFree(d_graph_mask);
hipFree(d_updating_graph_mask);
hipFree(d_graph_visited);
hipFree(d_cost);
hipFree(d_over);
return FLT_MAX;
}
hipEvent_t tstart, tstop;
hipEventCreate(&tstart);
hipEventCreate(&tstop);
float elapsedTime;
double transferTime = 0.;
hipEventRecord(tstart, 0);
hipMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, hipMemcpyHostToDevice) ;
hipMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice) ;
hipMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
hipMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
hipMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
hipMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, hipMemcpyHostToDevice) ;
hipEventRecord(tstop, 0);
hipEventSynchronize(tstop);
hipEventElapsedTime(&elapsedTime, tstart, tstop);
transferTime += elapsedTime * 1.e-3; // convert to seconds
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
double kernelTime = 0;
int k=0;
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
hipMemcpy( d_over, &stop, sizeof(bool), hipMemcpyHostToDevice) ;
hipEventRecord(tstart, 0);
hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);
hipEventRecord(tstop, 0);
hipEventSynchronize(tstop);
hipEventElapsedTime(&elapsedTime, tstart, tstop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
// check if kernel execution generated an error
hipEventRecord(tstart, 0);
hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
hipEventRecord(tstop, 0);
hipEventSynchronize(tstop);
hipEventElapsedTime(&elapsedTime, tstart, tstop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR()
hipMemcpy( &stop, d_over, sizeof(bool), hipMemcpyDeviceToHost) ;
k++;
}
while(stop);
if(verbose) {
printf("Kernel Executed %d times\n",k);
}
// copy result from device to host
hipEventRecord(tstart, 0);
hipMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, hipMemcpyDeviceToHost) ;
hipEventRecord(tstop, 0);
hipEventSynchronize(tstop);
hipEventElapsedTime(&elapsedTime, tstart, tstop);
transferTime += elapsedTime * 1.e-3; // convert to seconds
//Store the result into a file
string outfile = op.getOptionString("outputFile");
if(outfile != "") {
FILE *fpo = fopen(outfile.c_str(),"w");
for(int i=0;i<no_of_nodes;i++) {
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
}
fclose(fpo);
}
// cleanup memory
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
hipFree(d_graph_nodes);
hipFree(d_graph_edges);
hipFree(d_graph_mask);
hipFree(d_updating_graph_mask);
hipFree(d_graph_visited);
hipFree(d_cost);
hipFree(d_over);
char tmp[64];
sprintf(tmp, "%dV,%dE", no_of_nodes, edge_list_size);
string atts = string(tmp);
resultDB.AddResult("bfs_transfer_time", atts, "sec", transferTime);
resultDB.AddResult("bfs_kernel_time", atts, "sec", kernelTime);
resultDB.AddResult("bfs_total_time", atts, "sec", transferTime + kernelTime);
resultDB.AddResult("bfs_rate_nodes", atts, "Nodes/s", no_of_nodes/kernelTime);
resultDB.AddResult("bfs_rate_edges", atts, "Edges/s", edge_list_size/kernelTime);
resultDB.AddResult("bfs_rate_parity", atts, "N", transferTime / kernelTime);
resultDB.AddOverall("Time", "sec", kernelTime+transferTime);
return transferTime + kernelTime;
}
#ifdef UNIFIED_MEMORY
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA and Unified Memory
////////////////////////////////////////////////////////////////////////////////
float BFSGraphUnifiedMemory(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges) {
bool verbose = op.getOptionBool("verbose");
bool quiet = op.getOptionBool("quiet");
int device = op.getOptionInt("device");
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// copy graph nodes to unified memory
Node* graph_nodes;
CUDA_SAFE_CALL(hipMallocManaged(&graph_nodes, sizeof(Node)*no_of_nodes));
memcpy(graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes);
hipError_t x;
x = hipMemPrefetchAsync(graph_nodes, sizeof(Node)*no_of_nodes, device);
if(x != hipSuccess) {
printf("failed\n");
}
//hipMemAdvise(graph_nodes, sizeof(Node)*no_of_nodes, hipMemAdviseSetReadMostly, device);
//hipMemAdvise(graph_nodes, sizeof(Node)*no_of_nodes, hipMemAdviseSetPreferredLocation, device);
// copy graph edges to unified memory
int* graph_edges;
CUDA_SAFE_CALL(hipMallocManaged(&graph_edges, sizeof(int)*edge_list_size));
memcpy(graph_edges, h_graph_edges, sizeof(int)*edge_list_size);
x = hipMemPrefetchAsync(graph_edges, sizeof(int)*edge_list_size, device);
if(x != hipSuccess) {
printf("failed\n");
}
hipMemAdvise(graph_edges, sizeof(int)*edge_list_size, hipMemAdviseSetReadMostly, device);
hipMemAdvise(graph_edges, sizeof(int)*edge_list_size, hipMemAdviseSetPreferredLocation, device);
// allocate and initalize the memory
bool* graph_mask;
bool* updating_graph_mask;
bool* graph_visited;
CUDA_SAFE_CALL(hipMallocManaged(&graph_mask, sizeof(bool)*no_of_nodes));
CUDA_SAFE_CALL(hipMallocManaged(&updating_graph_mask, sizeof(bool)*no_of_nodes));
CUDA_SAFE_CALL(hipMallocManaged(&graph_visited, sizeof(bool)*no_of_nodes));
x = hipMemPrefetchAsync(graph_mask, sizeof(bool)*no_of_nodes, device);
if(x != hipSuccess) {
printf("failed\n");
}
x = hipMemPrefetchAsync(updating_graph_mask, sizeof(bool)*no_of_nodes, device);
if(x != hipSuccess) {
printf("failed\n");
}
x = hipMemPrefetchAsync(graph_visited, sizeof(bool)*no_of_nodes, device);
if(x != hipSuccess) {
printf("failed\n");
}
hipMemAdvise(graph_mask, sizeof(bool)*no_of_nodes, hipMemAdviseSetPreferredLocation, device);
hipMemAdvise(updating_graph_mask, sizeof(bool)*no_of_nodes, hipMemAdviseSetPreferredLocation, device);
hipMemAdvise(graph_visited, sizeof(bool)*no_of_nodes, hipMemAdviseSetPreferredLocation, device);
hipError_t err = hipGetLastError();
for( int i = 0; i < no_of_nodes; i++)
{
graph_mask[i]=false;
updating_graph_mask[i]=false;
graph_visited[i]=false;
}
//set the source node as true in the mask
graph_mask[source]=true;
graph_visited[source]=true;
// allocate and initialize memory for result
int* cost;
CUDA_SAFE_CALL(hipMallocManaged(&cost, sizeof(int)*no_of_nodes));
if(err != hipSuccess) {
hipFree(graph_nodes);
hipFree(graph_edges);
hipFree(graph_mask);
hipFree(updating_graph_mask);
hipFree(graph_visited);
hipFree(cost);
return FLT_MAX;
}
for(int i=0;i<no_of_nodes;i++) {
cost[i]=-1;
}
cost[source]=0;
// bool if execution is over
bool* over;
CUDA_SAFE_CALL(hipMallocManaged(&over, sizeof(bool)));
// events for timing
hipEvent_t tstart, tstop;
hipEventCreate(&tstart);
hipEventCreate(&tstop);
float elapsedTime;
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
double kernelTime = 0;
int k=0;
bool stop;
//Call the Kernel until all the elements of Frontier are not false
do
{
stop = false;
*over = stop;
hipEventRecord(tstart, 0);
hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, graph_nodes, graph_edges, graph_mask, updating_graph_mask, graph_visited, cost, no_of_nodes);
hipEventRecord(tstop, 0);
hipEventSynchronize(tstop);
hipEventElapsedTime(&elapsedTime, tstart, tstop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
// check if kernel execution generated an error
hipEventRecord(tstart, 0);
hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, graph_mask, updating_graph_mask, graph_visited, over, no_of_nodes);
hipEventRecord(tstop, 0);
hipEventSynchronize(tstop);
hipEventElapsedTime(&elapsedTime, tstart, tstop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR()
stop = *over;
k++;
}
while(stop);
if(verbose && !quiet) {
printf("Kernel Time: %f\n", kernelTime);
printf("Kernel Executed %d times\n",k);
}
// cleanup memory
hipFree(graph_nodes);
hipFree(graph_edges);
hipFree(graph_mask);
hipFree(updating_graph_mask);
hipFree(graph_visited);
hipFree(cost);
hipFree(over);
char tmp[64];
sprintf(tmp, "%dV,%dE", no_of_nodes, edge_list_size);
string atts = string(tmp);
resultDB.AddResult("bfs_unifiedmem_total_time", atts, "sec", kernelTime);
resultDB.AddResult("bfs_unifiedmem_rate_nodes", atts, "Nodes/s", no_of_nodes/kernelTime);
resultDB.AddResult("bfs_unifiedmem_rate_edges", atts, "Edges/s", edge_list_size/kernelTime);
return kernelTime;
}
#endif
| 34e50ba43d905c7984a78e23e4e95a9c2d982dec.cu | /***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <cassert>
#include <math.h>
#include <cuda.h>
#include "cudacommon.h"
#include "ResultDatabase.h"
#include "OptionParser.h"
#define MIN_NODES 20
#define MAX_NODES ULONG_MAX
#define MIN_EDGES 2
#define MAX_INIT_EDGES 4 // Nodes will have, on average, 2*MAX_INIT_EDGES edges
#define MIN_WEIGHT 1
#define MAX_WEIGHT 10
#define SEED 7
#define MAX_THREADS_PER_BLOCK 256
using namespace std;
struct Node
{
int starting;
int no_of_edges;
};
void initGraph(OptionParser &op, int &no_of_nodes, int &edge_list_size, int &source, Node* &h_graph_nodes, int* &h_graph_edges);
float BFSGraph(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges);
#ifdef UNIFIED_MEMORY
float BFSGraphUnifiedMemory(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges);
#endif
////////////////////////////////////////////////////////////////////////////////
__global__ void Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_updating_graph_mask, bool *g_graph_visited, int* g_cost, int no_of_nodes)
{
int tid = (blockIdx.x*MAX_THREADS_PER_BLOCK) + threadIdx.x;
if( tid<no_of_nodes && g_graph_mask[tid])
{
g_graph_mask[tid]=false;
for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++)
{
int id = g_graph_edges[i];
if(!g_graph_visited[id])
{
g_cost[id]=g_cost[tid]+1;
g_updating_graph_mask[id]=true;
}
}
}
}
__global__ void Kernel2( bool* g_graph_mask, bool *g_updating_graph_mask, bool* g_graph_visited, bool *g_over, int no_of_nodes)
{
int tid = (blockIdx.x*MAX_THREADS_PER_BLOCK) + threadIdx.x;
if( tid<no_of_nodes && g_updating_graph_mask[tid])
{
g_graph_mask[tid]=true;
g_graph_visited[tid]=true;
*g_over=true;
g_updating_graph_mask[tid]=false;
}
}
////////////////////////////////////////////////////////////////////////////////
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ****************************************************************************
void addBenchmarkSpecOptions(OptionParser &op) {
}
// ****************************************************************************
// Function: RunBenchmark
//
// Purpose:
// Executes the radix sort benchmark
//
// Arguments:
// resultDB: results from the benchmark are stored in this db
// op: the options parser / parameter database
//
// Returns: nothing, results are stored in resultDB
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ****************************************************************************
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
printf("Running BFS\n");
int device;
cudaGetDevice(&device);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
// seed random number generator
srand(SEED);
int no_of_nodes = 0;
int edge_list_size = 0;
int source = 0;
Node* h_graph_nodes;
int* h_graph_edges;
initGraph(op, no_of_nodes, edge_list_size, source, h_graph_nodes, h_graph_edges);
// atts string for result database
char tmp[64];
sprintf(tmp, "%dV,%dE", no_of_nodes, edge_list_size);
string atts = string(tmp);
bool quiet = op.getOptionBool("quiet");
int passes = op.getOptionInt("passes");
for(int i = 0; i < passes; i++) {
if(!quiet) {
printf("Pass %d:\n", i);
}
float time = BFSGraph(resultDB, op, no_of_nodes, edge_list_size, source, h_graph_nodes, h_graph_edges);
if(!quiet) {
if(time == FLT_MAX) {
printf("Executing BFS...Error.\n");
} else {
printf("Executing BFS...Done.\n");
}
}
#ifdef UNIFIED_MEMORY
float timeUM = BFSGraphUnifiedMemory(resultDB, op, no_of_nodes, edge_list_size, source, h_graph_nodes, h_graph_edges);
if(!quiet) {
if(timeUM == FLT_MAX) {
printf("Executing BFS using unified memory...Error.\n");
} else {
printf("Executing BFS using unified memory...Done.\n");
}
}
if(time != FLT_MAX && timeUM != FLT_MAX) {
resultDB.AddResult("bfs_unifiedmem_speedup", atts, "N", time/timeUM);
}
#endif
}
free( h_graph_nodes);
free( h_graph_edges);
}
////////////////////////////////////////////////////////////////////////////////
//Generate uniform distribution
////////////////////////////////////////////////////////////////////////////////
int uniform_distribution(int rangeLow, int rangeHigh) {
double myRand = rand()/(1.0 + RAND_MAX);
int range = rangeHigh - rangeLow + 1;
int myRand_scaled = (myRand * range) + rangeLow;
return myRand_scaled;
}
////////////////////////////////////////////////////////////////////////////////
//Initialize Graph
////////////////////////////////////////////////////////////////////////////////
void initGraph(OptionParser &op, int &no_of_nodes, int &edge_list_size, int &source, Node* &h_graph_nodes, int* &h_graph_edges) {
bool quiet = op.getOptionBool("quiet");
// open input file for reading
FILE *fp = NULL;
string infile = op.getOptionString("inputFile");
if(infile != "") {
fp = fopen(infile.c_str(),"r");
if(!fp && !quiet)
{
printf("Error: Unable to read graph file %s.\n", infile.c_str());
}
}
if(!quiet) {
if(fp) {
printf("Reading graph file\n");
} else {
printf("Generating graph with problem size %d\n", (int)op.getOptionInt("size"));
}
}
// initialize number of nodes
if(fp) {
int n = fscanf(fp,"%d",&no_of_nodes);
assert(n == 1);
} else {
int problemSizes[4] = {10, 50, 200, 400};
no_of_nodes = problemSizes[op.getOptionInt("size") - 1] * 1024 * 1024;
}
// initalize the nodes & number of edges
h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
int start;
int edgeno;
for( int i = 0; i < no_of_nodes; i++)
{
if(fp) {
int n = fscanf(fp,"%d %d",&start,&edgeno);
assert(n == 2);
} else {
start = edge_list_size;
edgeno = rand() % (MAX_INIT_EDGES - MIN_EDGES + 1) + MIN_EDGES;
}
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
edge_list_size += edgeno;
}
// initialize the source node
if(fp) {
int n = fscanf(fp,"%d",&source);
assert(n == 1);
} else {
source = uniform_distribution(0, no_of_nodes - 1);
}
source = 0;
if(fp) {
int edges;
int n = fscanf(fp,"%d",&edges);
assert(n == 1);
assert(edges == edge_list_size);
}
// initialize the edges
int id;
int cost;
h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++)
{
if(fp) {
int n = fscanf(fp,"%d %d",&id, &cost);
assert(n == 2);
} else {
id = uniform_distribution(0, no_of_nodes - 1);
//cost = rand() % (MAX_WEIGHT - MIN_WEIGHT + 1) + MIN_WEIGHT;
}
h_graph_edges[i] = id;
}
if(!quiet) {
if(fp) {
fclose(fp);
printf("Done reading graph file\n");
} else {
printf("Done generating graph\n");
}
printf("Graph size: %d nodes, %d edges\n", no_of_nodes, edge_list_size);
}
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
float BFSGraph(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges)
{
bool verbose = op.getOptionBool("verbose");
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// allocate host memory
bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
// initalize the memory
for( int i = 0; i < no_of_nodes; i++)
{
h_graph_mask[i]=false;
h_updating_graph_mask[i]=false;
h_graph_visited[i]=false;
}
//set the source node as true in the mask
h_graph_mask[source]=true;
h_graph_visited[source]=true;
// allocate mem for the result on host side
int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++) {
h_cost[i]=-1;
}
h_cost[source]=0;
// node list
Node* d_graph_nodes;
// edge list
int* d_graph_edges;
// mask
bool* d_graph_mask;
bool* d_updating_graph_mask;
// visited nodes
bool* d_graph_visited;
// result
int* d_cost;
// bool if execution is over
bool *d_over;
CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes));
CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size));
CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes));
CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes));
CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes));
CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes));
CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_over, sizeof(bool)));
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess) {
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
cudaFree(d_graph_nodes);
cudaFree(d_graph_edges);
cudaFree(d_graph_mask);
cudaFree(d_updating_graph_mask);
cudaFree(d_graph_visited);
cudaFree(d_cost);
cudaFree(d_over);
return FLT_MAX;
}
cudaEvent_t tstart, tstop;
cudaEventCreate(&tstart);
cudaEventCreate(&tstop);
float elapsedTime;
double transferTime = 0.;
cudaEventRecord(tstart, 0);
cudaMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, cudaMemcpyHostToDevice) ;
cudaMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice) ;
cudaMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
cudaMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
cudaMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
cudaMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, cudaMemcpyHostToDevice) ;
cudaEventRecord(tstop, 0);
cudaEventSynchronize(tstop);
cudaEventElapsedTime(&elapsedTime, tstart, tstop);
transferTime += elapsedTime * 1.e-3; // convert to seconds
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
double kernelTime = 0;
int k=0;
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
cudaMemcpy( d_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ;
cudaEventRecord(tstart, 0);
Kernel<<< grid, threads, 0 >>>( d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);
cudaEventRecord(tstop, 0);
cudaEventSynchronize(tstop);
cudaEventElapsedTime(&elapsedTime, tstart, tstop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
// check if kernel execution generated an error
cudaEventRecord(tstart, 0);
Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
cudaEventRecord(tstop, 0);
cudaEventSynchronize(tstop);
cudaEventElapsedTime(&elapsedTime, tstart, tstop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR()
cudaMemcpy( &stop, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ;
k++;
}
while(stop);
if(verbose) {
printf("Kernel Executed %d times\n",k);
}
// copy result from device to host
cudaEventRecord(tstart, 0);
cudaMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, cudaMemcpyDeviceToHost) ;
cudaEventRecord(tstop, 0);
cudaEventSynchronize(tstop);
cudaEventElapsedTime(&elapsedTime, tstart, tstop);
transferTime += elapsedTime * 1.e-3; // convert to seconds
//Store the result into a file
string outfile = op.getOptionString("outputFile");
if(outfile != "") {
FILE *fpo = fopen(outfile.c_str(),"w");
for(int i=0;i<no_of_nodes;i++) {
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
}
fclose(fpo);
}
// cleanup memory
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
cudaFree(d_graph_nodes);
cudaFree(d_graph_edges);
cudaFree(d_graph_mask);
cudaFree(d_updating_graph_mask);
cudaFree(d_graph_visited);
cudaFree(d_cost);
cudaFree(d_over);
char tmp[64];
sprintf(tmp, "%dV,%dE", no_of_nodes, edge_list_size);
string atts = string(tmp);
resultDB.AddResult("bfs_transfer_time", atts, "sec", transferTime);
resultDB.AddResult("bfs_kernel_time", atts, "sec", kernelTime);
resultDB.AddResult("bfs_total_time", atts, "sec", transferTime + kernelTime);
resultDB.AddResult("bfs_rate_nodes", atts, "Nodes/s", no_of_nodes/kernelTime);
resultDB.AddResult("bfs_rate_edges", atts, "Edges/s", edge_list_size/kernelTime);
resultDB.AddResult("bfs_rate_parity", atts, "N", transferTime / kernelTime);
resultDB.AddOverall("Time", "sec", kernelTime+transferTime);
return transferTime + kernelTime;
}
#ifdef UNIFIED_MEMORY
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA and Unified Memory
////////////////////////////////////////////////////////////////////////////////
float BFSGraphUnifiedMemory(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges) {
bool verbose = op.getOptionBool("verbose");
bool quiet = op.getOptionBool("quiet");
int device = op.getOptionInt("device");
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// copy graph nodes to unified memory
Node* graph_nodes;
CUDA_SAFE_CALL(cudaMallocManaged(&graph_nodes, sizeof(Node)*no_of_nodes));
memcpy(graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes);
cudaError_t x;
x = cudaMemPrefetchAsync(graph_nodes, sizeof(Node)*no_of_nodes, device);
if(x != cudaSuccess) {
printf("failed\n");
}
//cudaMemAdvise(graph_nodes, sizeof(Node)*no_of_nodes, cudaMemAdviseSetReadMostly, device);
//cudaMemAdvise(graph_nodes, sizeof(Node)*no_of_nodes, cudaMemAdviseSetPreferredLocation, device);
// copy graph edges to unified memory
int* graph_edges;
CUDA_SAFE_CALL(cudaMallocManaged(&graph_edges, sizeof(int)*edge_list_size));
memcpy(graph_edges, h_graph_edges, sizeof(int)*edge_list_size);
x = cudaMemPrefetchAsync(graph_edges, sizeof(int)*edge_list_size, device);
if(x != cudaSuccess) {
printf("failed\n");
}
cudaMemAdvise(graph_edges, sizeof(int)*edge_list_size, cudaMemAdviseSetReadMostly, device);
cudaMemAdvise(graph_edges, sizeof(int)*edge_list_size, cudaMemAdviseSetPreferredLocation, device);
// allocate and initalize the memory
bool* graph_mask;
bool* updating_graph_mask;
bool* graph_visited;
CUDA_SAFE_CALL(cudaMallocManaged(&graph_mask, sizeof(bool)*no_of_nodes));
CUDA_SAFE_CALL(cudaMallocManaged(&updating_graph_mask, sizeof(bool)*no_of_nodes));
CUDA_SAFE_CALL(cudaMallocManaged(&graph_visited, sizeof(bool)*no_of_nodes));
x = cudaMemPrefetchAsync(graph_mask, sizeof(bool)*no_of_nodes, device);
if(x != cudaSuccess) {
printf("failed\n");
}
x = cudaMemPrefetchAsync(updating_graph_mask, sizeof(bool)*no_of_nodes, device);
if(x != cudaSuccess) {
printf("failed\n");
}
x = cudaMemPrefetchAsync(graph_visited, sizeof(bool)*no_of_nodes, device);
if(x != cudaSuccess) {
printf("failed\n");
}
cudaMemAdvise(graph_mask, sizeof(bool)*no_of_nodes, cudaMemAdviseSetPreferredLocation, device);
cudaMemAdvise(updating_graph_mask, sizeof(bool)*no_of_nodes, cudaMemAdviseSetPreferredLocation, device);
cudaMemAdvise(graph_visited, sizeof(bool)*no_of_nodes, cudaMemAdviseSetPreferredLocation, device);
cudaError_t err = cudaGetLastError();
for( int i = 0; i < no_of_nodes; i++)
{
graph_mask[i]=false;
updating_graph_mask[i]=false;
graph_visited[i]=false;
}
//set the source node as true in the mask
graph_mask[source]=true;
graph_visited[source]=true;
// allocate and initialize memory for result
int* cost;
CUDA_SAFE_CALL(cudaMallocManaged(&cost, sizeof(int)*no_of_nodes));
if(err != cudaSuccess) {
cudaFree(graph_nodes);
cudaFree(graph_edges);
cudaFree(graph_mask);
cudaFree(updating_graph_mask);
cudaFree(graph_visited);
cudaFree(cost);
return FLT_MAX;
}
for(int i=0;i<no_of_nodes;i++) {
cost[i]=-1;
}
cost[source]=0;
// bool if execution is over
bool* over;
CUDA_SAFE_CALL(cudaMallocManaged(&over, sizeof(bool)));
// events for timing
cudaEvent_t tstart, tstop;
cudaEventCreate(&tstart);
cudaEventCreate(&tstop);
float elapsedTime;
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
double kernelTime = 0;
int k=0;
bool stop;
//Call the Kernel until all the elements of Frontier are not false
do
{
stop = false;
*over = stop;
cudaEventRecord(tstart, 0);
Kernel<<< grid, threads, 0 >>>(graph_nodes, graph_edges, graph_mask, updating_graph_mask, graph_visited, cost, no_of_nodes);
cudaEventRecord(tstop, 0);
cudaEventSynchronize(tstop);
cudaEventElapsedTime(&elapsedTime, tstart, tstop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
// check if kernel execution generated an error
cudaEventRecord(tstart, 0);
Kernel2<<< grid, threads, 0 >>>(graph_mask, updating_graph_mask, graph_visited, over, no_of_nodes);
cudaEventRecord(tstop, 0);
cudaEventSynchronize(tstop);
cudaEventElapsedTime(&elapsedTime, tstart, tstop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR()
stop = *over;
k++;
}
while(stop);
if(verbose && !quiet) {
printf("Kernel Time: %f\n", kernelTime);
printf("Kernel Executed %d times\n",k);
}
// cleanup memory
cudaFree(graph_nodes);
cudaFree(graph_edges);
cudaFree(graph_mask);
cudaFree(updating_graph_mask);
cudaFree(graph_visited);
cudaFree(cost);
cudaFree(over);
char tmp[64];
sprintf(tmp, "%dV,%dE", no_of_nodes, edge_list_size);
string atts = string(tmp);
resultDB.AddResult("bfs_unifiedmem_total_time", atts, "sec", kernelTime);
resultDB.AddResult("bfs_unifiedmem_rate_nodes", atts, "Nodes/s", no_of_nodes/kernelTime);
resultDB.AddResult("bfs_unifiedmem_rate_edges", atts, "Edges/s", edge_list_size/kernelTime);
return kernelTime;
}
#endif
|
8c83ca9ec0485dd9b8f4d41423a24e906aa0ef30.hip | // !!! This is a file automatically generated by hipify!!!
/* Header below added by Tulsi for replaced CUBLAS code */
#include "hip/hip_runtime.h"
#include <cusolverDn.h>
#include <rocblas.h>
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* lu.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "lu.h"
/* Array initialization. */
static
void init_array (int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n))
{
int i, j;
for (i = 0; i < n; i++)
{
for (j = 0; j <= i; j++)
A[i][j] = (DATA_TYPE)(-j % n) / n + 1;
for (j = i+1; j < n; j++) {
A[i][j] = 0;
}
A[i][i] = 1;
}
/* Make the matrix positive semi-definite. */
/* not necessary for LU, but using same code as cholesky */
int r,s,t;
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, N, N, n, n);
for (r = 0; r < n; ++r)
for (s = 0; s < n; ++s)
(POLYBENCH_ARRAY(B))[r][s] = 0;
for (t = 0; t < n; ++t)
for (r = 0; r < n; ++r)
for (s = 0; s < n; ++s)
(POLYBENCH_ARRAY(B))[r][s] += A[r][t] * A[s][t];
for (r = 0; r < n; ++r)
for (s = 0; s < n; ++s)
A[r][s] = (POLYBENCH_ARRAY(B))[r][s];
POLYBENCH_FREE_ARRAY(B);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n))
{
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("A");
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, A[i][j]);
}
POLYBENCH_DUMP_END("A");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_lu(int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n))
{
int i, j, k;
//#pragma scop
double *d_A,*d_B,*d_AT;
const double cublas_alpha = 1.0;
const double cublas_beta = 0.0;
hipblasHandle_t handle;
hipblasCreate(&handle);
hipMalloc((void **)&d_A, n * n * sizeof(double));
hipMalloc((void **)&d_AT, n * n * sizeof(double));
hipMemcpy(d_A, A, n * n * sizeof(double), hipMemcpyHostToDevice);
hipblasDgeam(handle,HIPBLAS_OP_T, HIPBLAS_OP_N, n, n, &cublas_alpha, d_A, n, &cublas_beta, d_B, n, d_AT, n);
int worksize = 0;
int *devInfo;
hipMalloc((void **)&devInfo, sizeof(int));
hipsolverDnHandle_t solver_handle;
hipsolverDnCreate(&solver_handle);
hipsolverDnDgetrf_bufferSize(solver_handle, n, n, d_AT, n, &worksize);
double *work;
hipMalloc((void **)&work, worksize * sizeof(double));
int *devIpiv;
hipMalloc((void **)&devIpiv, n * sizeof(int));
hipsolverDnDgetrf(solver_handle, n, n, d_AT, n, work, devIpiv, devInfo);
int devInfo_h = 0;
hipMemcpy(&devInfo_h, devInfo, sizeof(int), hipMemcpyDeviceToHost);
if (devInfo_h != 0)
printf("Unsuccessful getrf execution\n\n");
printf("\nFactorized matrix\n");
hipblasDgeam(handle,HIPBLAS_OP_T, HIPBLAS_OP_N, n, n, &cublas_alpha, d_AT, n, &cublas_beta, d_B, n, d_A, n);
hipMemcpy(A, d_A, n * n * sizeof(double), hipMemcpyDeviceToHost);
hipblasDestroy(handle);
hipFree(d_A);
hipFree(d_AT);
hipFree(devInfo);
hipFree(work);
hipFree(devIpiv);
hipsolverDnDestroy(solver_handle);
//#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n);
/* Initialize array(s). */
init_array (n, POLYBENCH_ARRAY(A));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_lu (n, POLYBENCH_ARRAY(A));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
return 0;
}
| 8c83ca9ec0485dd9b8f4d41423a24e906aa0ef30.cu |
/* Header below added by Tulsi for replaced CUBLAS code */
#include "cuda_runtime.h"
#include <cusolverDn.h>
#include <cublas_v2.h>
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* lu.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "lu.h"
/* Array initialization. */
static
void init_array (int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n))
{
int i, j;
for (i = 0; i < n; i++)
{
for (j = 0; j <= i; j++)
A[i][j] = (DATA_TYPE)(-j % n) / n + 1;
for (j = i+1; j < n; j++) {
A[i][j] = 0;
}
A[i][i] = 1;
}
/* Make the matrix positive semi-definite. */
/* not necessary for LU, but using same code as cholesky */
int r,s,t;
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, N, N, n, n);
for (r = 0; r < n; ++r)
for (s = 0; s < n; ++s)
(POLYBENCH_ARRAY(B))[r][s] = 0;
for (t = 0; t < n; ++t)
for (r = 0; r < n; ++r)
for (s = 0; s < n; ++s)
(POLYBENCH_ARRAY(B))[r][s] += A[r][t] * A[s][t];
for (r = 0; r < n; ++r)
for (s = 0; s < n; ++s)
A[r][s] = (POLYBENCH_ARRAY(B))[r][s];
POLYBENCH_FREE_ARRAY(B);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n))
{
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("A");
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, A[i][j]);
}
POLYBENCH_DUMP_END("A");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_lu(int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n))
{
int i, j, k;
//#pragma scop
double *d_A,*d_B,*d_AT;
const double cublas_alpha = 1.0;
const double cublas_beta = 0.0;
cublasHandle_t handle;
cublasCreate(&handle);
cudaMalloc((void **)&d_A, n * n * sizeof(double));
cudaMalloc((void **)&d_AT, n * n * sizeof(double));
cudaMemcpy(d_A, A, n * n * sizeof(double), cudaMemcpyHostToDevice);
cublasDgeam(handle,CUBLAS_OP_T, CUBLAS_OP_N, n, n, &cublas_alpha, d_A, n, &cublas_beta, d_B, n, d_AT, n);
int worksize = 0;
int *devInfo;
cudaMalloc((void **)&devInfo, sizeof(int));
cusolverDnHandle_t solver_handle;
cusolverDnCreate(&solver_handle);
cusolverDnDgetrf_bufferSize(solver_handle, n, n, d_AT, n, &worksize);
double *work;
cudaMalloc((void **)&work, worksize * sizeof(double));
int *devIpiv;
cudaMalloc((void **)&devIpiv, n * sizeof(int));
cusolverDnDgetrf(solver_handle, n, n, d_AT, n, work, devIpiv, devInfo);
int devInfo_h = 0;
cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost);
if (devInfo_h != 0)
printf("Unsuccessful getrf execution\n\n");
printf("\nFactorized matrix\n");
cublasDgeam(handle,CUBLAS_OP_T, CUBLAS_OP_N, n, n, &cublas_alpha, d_AT, n, &cublas_beta, d_B, n, d_A, n);
cudaMemcpy(A, d_A, n * n * sizeof(double), cudaMemcpyDeviceToHost);
cublasDestroy(handle);
cudaFree(d_A);
cudaFree(d_AT);
cudaFree(devInfo);
cudaFree(work);
cudaFree(devIpiv);
cusolverDnDestroy(solver_handle);
//#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n);
/* Initialize array(s). */
init_array (n, POLYBENCH_ARRAY(A));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_lu (n, POLYBENCH_ARRAY(A));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
return 0;
}
|
6a2e9b3a41745fb26021ed2d6df32ef3fa4eb0d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************
* Module that applay the function sigmoid to all the elements of the matrix
* Author: Alonso Vidales <alonso.vidales@tras2.es>
*
* To be compiled with nvcc -ptx matrix_pow_two.cu
* Debug: nvcc -arch=sm_20 -ptx matrix_pow_two.cu
*
**************************************************/
//#include <stdio.h>
#ifdef __cplusplus
extern "C" {
#endif
// CUDA Kernel
__global__ void matrixPowTwo(double* A, int resW, int resH, int width, int finalSize)
{
int x = threadIdx.x + (blockIdx.x * resW);
int y = threadIdx.y + (blockIdx.y * resH);
int resultPos = y * width + x;
if (resultPos < finalSize && x < width) {
//printf("IN Block %d - %d, wA: %d thread %d - %d Val: %f resultPos: %d finalSize: %d\n", x, y, wA, threadIdx.x, threadIdx.y, A[resultPos], resultPos, finalSize);
A[resultPos] = A[resultPos] * A[resultPos];
}
}
#ifdef __cplusplus
}
#endif
| 6a2e9b3a41745fb26021ed2d6df32ef3fa4eb0d0.cu | /***************************************************
* Module that applay the function sigmoid to all the elements of the matrix
* Author: Alonso Vidales <alonso.vidales@tras2.es>
*
* To be compiled with nvcc -ptx matrix_pow_two.cu
* Debug: nvcc -arch=sm_20 -ptx matrix_pow_two.cu
*
**************************************************/
//#include <stdio.h>
#ifdef __cplusplus
extern "C" {
#endif
// CUDA Kernel
__global__ void matrixPowTwo(double* A, int resW, int resH, int width, int finalSize)
{
int x = threadIdx.x + (blockIdx.x * resW);
int y = threadIdx.y + (blockIdx.y * resH);
int resultPos = y * width + x;
if (resultPos < finalSize && x < width) {
//printf("IN Block %d - %d, wA: %d thread %d - %d Val: %f resultPos: %d finalSize: %d\n", x, y, wA, threadIdx.x, threadIdx.y, A[resultPos], resultPos, finalSize);
A[resultPos] = A[resultPos] * A[resultPos];
}
}
#ifdef __cplusplus
}
#endif
|
c883fb445311b89e68dd90f5fed48835fa2a7f91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
%%cu
#include<stdio.h>
#include<stdlib.h>
#define size 10
__global__ void MatMul(int *a, int *b, int*c, int n)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int result = 0;
for( int j=0;j<size; j++)
{
int p = *(a + ty*size + j);
int q = *(b + j*size + tx);
result = result + p*q;
}
c[ty*size + tx] = result;
}
int main()
{
int *A,*B,*C;
A = (int*)malloc(size * size * sizeof(int));
B = (int*)malloc(size * size * sizeof(int));
C = (int*)malloc(size * size * sizeof(int));
for(int i=0; i<size;i++)
{
for(int j=0; j<size; j++)
{
*(A + i*size + j) = rand()%10;
*(B + i*size + j) = rand()%10;
}
}
int *AD, *BD, *CD;
hipMalloc(&AD, size*size*sizeof(int));
hipMalloc(&BD, size*size*sizeof(int));
hipMalloc(&CD, size*size*sizeof(int));
hipMemcpy(AD, A, size*size*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(BD, B, size*size*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( MatMul), dim3(1),dim3(size*size), 0, 0, AD, BD, CD, size);
hipMemcpy(C, CD, size*size*sizeof(int), hipMemcpyDeviceToHost);
printf("Matrix A: \n");
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
printf("%d ", *(A + i*size + j));
}
printf("\n");
}
printf("\n");
printf("Matrix B: \n");
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
printf("%d ", *(B + i*size + j));
}
printf("\n");
}
printf("Product: \n");
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
printf("%d ", *(C + i*size + j));
}
printf("\n");
}
printf("\n");
hipFree(AD);
hipFree(BD);
hipFree(CD);
free(A);
free(B);
free(C);
return 0;
}
| c883fb445311b89e68dd90f5fed48835fa2a7f91.cu | %%cu
#include<stdio.h>
#include<stdlib.h>
#define size 10
__global__ void MatMul(int *a, int *b, int*c, int n)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int result = 0;
for( int j=0;j<size; j++)
{
int p = *(a + ty*size + j);
int q = *(b + j*size + tx);
result = result + p*q;
}
c[ty*size + tx] = result;
}
int main()
{
int *A,*B,*C;
A = (int*)malloc(size * size * sizeof(int));
B = (int*)malloc(size * size * sizeof(int));
C = (int*)malloc(size * size * sizeof(int));
for(int i=0; i<size;i++)
{
for(int j=0; j<size; j++)
{
*(A + i*size + j) = rand()%10;
*(B + i*size + j) = rand()%10;
}
}
int *AD, *BD, *CD;
cudaMalloc(&AD, size*size*sizeof(int));
cudaMalloc(&BD, size*size*sizeof(int));
cudaMalloc(&CD, size*size*sizeof(int));
cudaMemcpy(AD, A, size*size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(BD, B, size*size*sizeof(int), cudaMemcpyHostToDevice);
MatMul<<<1,size*size>>>(AD, BD, CD, size);
cudaMemcpy(C, CD, size*size*sizeof(int), cudaMemcpyDeviceToHost);
printf("Matrix A: \n");
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
printf("%d ", *(A + i*size + j));
}
printf("\n");
}
printf("\n");
printf("Matrix B: \n");
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
printf("%d ", *(B + i*size + j));
}
printf("\n");
}
printf("Product: \n");
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
printf("%d ", *(C + i*size + j));
}
printf("\n");
}
printf("\n");
cudaFree(AD);
cudaFree(BD);
cudaFree(CD);
free(A);
free(B);
free(C);
return 0;
}
|
9ef8f9a05ad58a5f945836e278eb40688f7dea19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Parallel programming addition of Two numbers
*/
#include<stdio.h>
__global__ void add(int *a, int *b)
{
int c;
c = *a + *b;
return(c);
}
int main(void)
{
add(2, 3);
printf("Addition \t", c);
return 0;
}
| 9ef8f9a05ad58a5f945836e278eb40688f7dea19.cu | /*
Parallel programming addition of Two numbers
*/
#include<stdio.h>
__global__ void add(int *a, int *b)
{
int c;
c = *a + *b;
return(c);
}
int main(void)
{
add(2, 3);
printf("Addition \t", c);
return 0;
}
|
105aa949835ac46699be1c8fc8e8e1ad3d47bece.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void BFS_Bqueue_kernel(unsigned int* p_frontier, unsigned int* p_frontier_tail, unsigned int* c_frontier, unsigned int* c_frontier_tail, unsigned int* edges, unsigned int* dest, unsigned int* label, unsigned int* visited) {
__shared__ unsigned int c_frontier_s[BLOCK_QUEUE_SIZE];
__shared__ unsigned int c_frontier_tail_s;
__shared__ unsigned int our_c_frontier_tail;
if (threadIdx.x == 0) {
c_frontier_tail_s = 0;
}
__syncthreads();
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < *p_frontier_tail) {
const unsigned int my_vertex = p_frontier[tid];
for (unsigned int i = edges[my_vertex]; i < edges[my_vertex + 1]; ++i) {
const unsigned int was_visited = atomicExch(&(visited[dest[i]]), 1);
if (not was_visited) {
label[dest[i]] = label[my_vertex];
const unsigned int my_tail = atomicAdd(&c_frontier_tail_s, 1);
if (my_tail < BLOCK_QUEUE_SIZE) {
c_frontier_s[my_tail] = dest[i];
} else {
c_frontier_tail_s = BLOCK_QUEUE_SIZE;
const unsigned int my_global_tail = atomicAdd(c_frontier_tail, 1);
c_frontier[my_global_tail] = dest[i];
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
our_c_frontier_tail = atomicAdd(c_frontier_tail, c_frontier_tail_s);
}
__syncthreads();
for (unsigned int i = threadIdx.x; i < c_frontier_tail_s; i+= blockDim.x) {
c_frontier[our_c_frontier_tail + i] = c_frontier_s[i];
}
}
} | 105aa949835ac46699be1c8fc8e8e1ad3d47bece.cu | #include "includes.h"
__global__ void BFS_Bqueue_kernel(unsigned int* p_frontier, unsigned int* p_frontier_tail, unsigned int* c_frontier, unsigned int* c_frontier_tail, unsigned int* edges, unsigned int* dest, unsigned int* label, unsigned int* visited) {
__shared__ unsigned int c_frontier_s[BLOCK_QUEUE_SIZE];
__shared__ unsigned int c_frontier_tail_s;
__shared__ unsigned int our_c_frontier_tail;
if (threadIdx.x == 0) {
c_frontier_tail_s = 0;
}
__syncthreads();
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < *p_frontier_tail) {
const unsigned int my_vertex = p_frontier[tid];
for (unsigned int i = edges[my_vertex]; i < edges[my_vertex + 1]; ++i) {
const unsigned int was_visited = atomicExch(&(visited[dest[i]]), 1);
if (not was_visited) {
label[dest[i]] = label[my_vertex];
const unsigned int my_tail = atomicAdd(&c_frontier_tail_s, 1);
if (my_tail < BLOCK_QUEUE_SIZE) {
c_frontier_s[my_tail] = dest[i];
} else {
c_frontier_tail_s = BLOCK_QUEUE_SIZE;
const unsigned int my_global_tail = atomicAdd(c_frontier_tail, 1);
c_frontier[my_global_tail] = dest[i];
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
our_c_frontier_tail = atomicAdd(c_frontier_tail, c_frontier_tail_s);
}
__syncthreads();
for (unsigned int i = threadIdx.x; i < c_frontier_tail_s; i+= blockDim.x) {
c_frontier[our_c_frontier_tail + i] = c_frontier_s[i];
}
}
} |
e8ae6d5859768fdb9d7282864bce6710d9e6d79b.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <hip/hip_runtime.h>
#include "cluster_cuda.cuh"
// This assumes address stores the average of n elements atomically updates
// address to store the average of n + 1 elements (the n elements as well as
// val). This might be useful for updating cluster centers.
// modified from http://stackoverflow.com/a/17401122
__device__
float atomicUpdateAverage(float* address, int n, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i;
int assumed;
do {
assumed = old;
float next_val = (n * __int_as_float(assumed) + val) / (n + 1);
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(next_val));
} while (assumed != old);
return __int_as_float(old);
}
// computes the distance squared between vectors a and b where vectors have
// length size and stride stride.
__device__
float squared_distance(float *a, float *b, int stride, int size) {
float dist = 0.0;
for (int i=0; i < size; i++) {
float diff = a[stride * i] - b[stride * i];
dist += diff * diff;
}
return dist;
}
/*
* Notationally, all matrices are column majors, so if I say that matrix Z is
* of size m * n, then the stride in the m axis is 1. For purposes of
* optimization (particularly coalesced accesses), you can change the format of
* any array.
*
* clusters is a REVIEW_DIM * k array containing the location of each of the k
* cluster centers.
*
* cluster_counts is a k element array containing how many data points are in
* each cluster.
*
* k is the number of clusters.
*
* data is a REVIEW_DIM * batch_size array containing the batch of reviews to
* cluster. Note that each review is contiguous (so elements 0 through 49 are
* review 0, ...)
*
* output is a batch_size array that contains the index of the cluster to which
* each review is the closest to.
*
* batch_size is the number of reviews this kernel must handle.
*/
__global__
void sloppyClusterKernel(float *clusters, int *cluster_counts, int k,
float *data, int *output, int batch_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
float best, tmp;
int best_cluster;
while (index < batch_size) {
best_cluster = 0;
best = squared_distance(clusters[0], data[index], REVIEW_DIM, 1);
for (int i = 1; i < k; i++) {
tmp = squared_distance(clusters[i * REVIEW_DIM],
data[index * REVIEW_DIM], REVIEW_DIM, 1);
if (tmp < best) {
best_cluster = i;
best = tmp;
}
}
cluster_counts[best_cluster]++;
output[index] = best_cluster;
for (int j = 0; j < REVIEW_DIM; j++) {
atomicUpdateAverage(clusters[REVIEW_DIM * best_cluster + j],
cluster_counts[best_cluster], data[REVIEW_DIM * index + j]);
}
}
}
void cudaCluster(float *clusters, int *cluster_counts, int k,
float *data, int *output, int batch_size,
hipStream_t stream) {
int block_size = (batch_size < 1024) ? batch_size : 1024;
// grid_size = CEIL(batch_size / block_size)
int grid_size = (batch_size + block_size - 1) / block_size;
int shmem_bytes = 0;
hipLaunchKernelGGL(( sloppyClusterKernel),
dim3(block_size),
dim3(grid_size),
shmem_bytes,
stream, clusters, cluster_counts, k, data, output, batch_size);
}
| e8ae6d5859768fdb9d7282864bce6710d9e6d79b.cu | #include <cassert>
#include <cuda_runtime.h>
#include "cluster_cuda.cuh"
// This assumes address stores the average of n elements atomically updates
// address to store the average of n + 1 elements (the n elements as well as
// val). This might be useful for updating cluster centers.
// modified from http://stackoverflow.com/a/17401122
__device__
float atomicUpdateAverage(float* address, int n, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i;
int assumed;
do {
assumed = old;
float next_val = (n * __int_as_float(assumed) + val) / (n + 1);
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(next_val));
} while (assumed != old);
return __int_as_float(old);
}
// computes the distance squared between vectors a and b where vectors have
// length size and stride stride.
__device__
float squared_distance(float *a, float *b, int stride, int size) {
float dist = 0.0;
for (int i=0; i < size; i++) {
float diff = a[stride * i] - b[stride * i];
dist += diff * diff;
}
return dist;
}
/*
* Notationally, all matrices are column majors, so if I say that matrix Z is
* of size m * n, then the stride in the m axis is 1. For purposes of
* optimization (particularly coalesced accesses), you can change the format of
* any array.
*
* clusters is a REVIEW_DIM * k array containing the location of each of the k
* cluster centers.
*
* cluster_counts is a k element array containing how many data points are in
* each cluster.
*
* k is the number of clusters.
*
* data is a REVIEW_DIM * batch_size array containing the batch of reviews to
* cluster. Note that each review is contiguous (so elements 0 through 49 are
* review 0, ...)
*
* output is a batch_size array that contains the index of the cluster to which
* each review is the closest to.
*
* batch_size is the number of reviews this kernel must handle.
*/
__global__
void sloppyClusterKernel(float *clusters, int *cluster_counts, int k,
float *data, int *output, int batch_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
float best, tmp;
int best_cluster;
while (index < batch_size) {
best_cluster = 0;
best = squared_distance(clusters[0], data[index], REVIEW_DIM, 1);
for (int i = 1; i < k; i++) {
tmp = squared_distance(clusters[i * REVIEW_DIM],
data[index * REVIEW_DIM], REVIEW_DIM, 1);
if (tmp < best) {
best_cluster = i;
best = tmp;
}
}
cluster_counts[best_cluster]++;
output[index] = best_cluster;
for (int j = 0; j < REVIEW_DIM; j++) {
atomicUpdateAverage(clusters[REVIEW_DIM * best_cluster + j],
cluster_counts[best_cluster], data[REVIEW_DIM * index + j]);
}
}
}
void cudaCluster(float *clusters, int *cluster_counts, int k,
float *data, int *output, int batch_size,
cudaStream_t stream) {
int block_size = (batch_size < 1024) ? batch_size : 1024;
// grid_size = CEIL(batch_size / block_size)
int grid_size = (batch_size + block_size - 1) / block_size;
int shmem_bytes = 0;
sloppyClusterKernel<<<
block_size,
grid_size,
shmem_bytes,
stream>>>(clusters, cluster_counts, k, data, output, batch_size);
}
|
768445cc1bb414ef6f6c099fed8f8f70cd353553.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/solver.hpp"
#include "caffe/layers/loss/be_gd_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
static __global__ void Be_Dloss_forward_kernel(int count, const Dtype *in_0, const Dtype * in_1, Dtype *out_d, Dtype * out_g)
{
CUDA_KERNEL_LOOP(i, count)
{
out_g[i] = abs(in_0[i] - in_1[i]);
out_d[i] = abs(in_0[i+count] - in_1[i+count]);
}
}
template <typename Dtype>
static __global__ void Be_Gloss_forward_kernel(int count, const Dtype *in_0, const Dtype * in_1, Dtype *out_g)
{
CUDA_KERNEL_LOOP(i, count)
{
out_g[i] = abs(in_0[i] - in_1[i]);
}
}
template <typename Dtype>
static __global__ void Be_Dloss_backward_kernel(int count, Dtype k, const Dtype *data_in_0, const Dtype * data_in_1, Dtype *diff_in_0, Dtype *diff_in_1)
{
CUDA_KERNEL_LOOP(i, count)
{
if (data_in_0[i] > data_in_1[i])
{
diff_in_0[i] = -k;
diff_in_1[i] = k;
}
else
{
diff_in_0[i] = k;
diff_in_1[i] = -k;
}
if (data_in_0[i+count] > data_in_1[i+count])
{
diff_in_0[i+count] = 1;
diff_in_1[i+count] = -1;
}
else
{
diff_in_0[i+count] = -1;
diff_in_1[i+count] = 1;
}
}
}
template <typename Dtype>
static __global__ void Be_Gloss_backward_kernel(int count, const Dtype *data_in_0, const Dtype *data_in_1, Dtype *diff_in_0, Dtype *diff_in_1)
{
CUDA_KERNEL_LOOP(i, count)
{
if (data_in_0[i] > data_in_1[i])
{
diff_in_0[i] = 1;
diff_in_1[i] = -1;
}
else
{
diff_in_0[i] = -1;
diff_in_1[i] = 1;
}
diff_in_0[i+count] = Dtype(0);
diff_in_1[i+count] = Dtype(0);
}
}
template <typename Dtype>
void BeGdLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
Dtype gamma = 0.5;
if (Caffe::gan_type() == "train_dnet")
{
hipLaunchKernelGGL(( Be_Dloss_forward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num/2*channels*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num/2*channels*height*width,bottom[0]->gpu_data(),bottom[1]->gpu_data(),loss_d_.mutable_gpu_data(),loss_g_.mutable_gpu_data());
caffe_gpu_asum(loss_d_.count(),loss_d_.gpu_data(),&sum_d);
caffe_gpu_asum(loss_g_.count(),loss_g_.gpu_data(),&sum_g);
//top[0]->mutable_cpu_data()[0] = (sum_d - k_ * sum_g) / (num/2*channels*height*width);
//top[0]->mutable_cpu_data()[0] = (sum_d + abs(gamma*sum_d - sum_g)) / (num/2*channels*height*width);
top[0]->mutable_cpu_data()[0] = sum_d / (num/2*channels*height*width);
}
else
{
hipLaunchKernelGGL(( Be_Gloss_forward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num/2*channels*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num/2*channels*height*width,bottom[0]->gpu_data(),bottom[1]->gpu_data(),loss_g_.mutable_gpu_data());
caffe_gpu_asum(loss_g_.count(),loss_g_.gpu_data(),&sum_g);
top[0]->mutable_cpu_data()[0] = sum_g / (num/2*channels*height*width);
this->blobs_[0]->mutable_cpu_data()[0] = 0.08;
//this->blobs_[0]->mutable_cpu_data()[0] = this->blobs_[0]->cpu_data()[0] + 0.001*(gamma*sum_d - sum_g) / Dtype(num/2*channels*height*width);
//LOG(INFO)<<"delta = "<<(gamma*sum_d - sum_g) / Dtype(num/2*channels*height*width);
//if (this->blobs_[0]->cpu_data()[0] < 0)
// this->blobs_[0]->mutable_cpu_data()[0] = 0;
//if (this->blobs_[0]->cpu_data()[0] > 1)
// this->blobs_[0]->mutable_cpu_data()[0] = 1.0;
//if (Solver<Dtype>::iter() % 100 == 0)
// LOG(INFO)<<this->blobs_[0]->cpu_data()[0];
}
}
template <typename Dtype>
void BeGdLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
if (Caffe::second_pass() == false)
{
Dtype loss_weights_ = top[0]->cpu_diff()[0] / (num/2*channels*height*width);
if (Caffe::gan_type() == "train_dnet")
{
hipLaunchKernelGGL(( Be_Dloss_backward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num/2*channels*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num/2*channels*height*width,this->blobs_[0]->cpu_data()[0],bottom[0]->gpu_data(),bottom[1]->gpu_data(),bottom[0]->mutable_gpu_diff(),bottom[1]->mutable_gpu_diff());
}
else
{
hipLaunchKernelGGL(( Be_Gloss_backward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num/2*channels*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num/2*channels*height*width,bottom[0]->gpu_data(),bottom[1]->gpu_data(),bottom[0]->mutable_gpu_diff(),bottom[1]->mutable_gpu_diff());
}
caffe_gpu_scal(bottom[0]->count(),loss_weights_,bottom[0]->mutable_gpu_diff());
caffe_gpu_scal(bottom[1]->count(),loss_weights_,bottom[1]->mutable_gpu_diff());
}
else
{
caffe_gpu_set(bottom[0]->count(),Dtype(0),bottom[0]->mutable_gpu_diff());
}
}
template <typename Dtype>
void BeGdLossLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
//Dtype sum;
//caffe_gpu_asum(bottom[0]->count(),bottom[0]->gpu_sec_diff(),&sum);
//LOG(INFO)<<sum;
}
INSTANTIATE_LAYER_GPU_FUNCS(BeGdLossLayer);
} // namespace caffe
| 768445cc1bb414ef6f6c099fed8f8f70cd353553.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/solver.hpp"
#include "caffe/layers/loss/be_gd_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
static __global__ void Be_Dloss_forward_kernel(int count, const Dtype *in_0, const Dtype * in_1, Dtype *out_d, Dtype * out_g)
{
CUDA_KERNEL_LOOP(i, count)
{
out_g[i] = abs(in_0[i] - in_1[i]);
out_d[i] = abs(in_0[i+count] - in_1[i+count]);
}
}
template <typename Dtype>
static __global__ void Be_Gloss_forward_kernel(int count, const Dtype *in_0, const Dtype * in_1, Dtype *out_g)
{
CUDA_KERNEL_LOOP(i, count)
{
out_g[i] = abs(in_0[i] - in_1[i]);
}
}
template <typename Dtype>
static __global__ void Be_Dloss_backward_kernel(int count, Dtype k, const Dtype *data_in_0, const Dtype * data_in_1, Dtype *diff_in_0, Dtype *diff_in_1)
{
CUDA_KERNEL_LOOP(i, count)
{
if (data_in_0[i] > data_in_1[i])
{
diff_in_0[i] = -k;
diff_in_1[i] = k;
}
else
{
diff_in_0[i] = k;
diff_in_1[i] = -k;
}
if (data_in_0[i+count] > data_in_1[i+count])
{
diff_in_0[i+count] = 1;
diff_in_1[i+count] = -1;
}
else
{
diff_in_0[i+count] = -1;
diff_in_1[i+count] = 1;
}
}
}
template <typename Dtype>
static __global__ void Be_Gloss_backward_kernel(int count, const Dtype *data_in_0, const Dtype *data_in_1, Dtype *diff_in_0, Dtype *diff_in_1)
{
CUDA_KERNEL_LOOP(i, count)
{
if (data_in_0[i] > data_in_1[i])
{
diff_in_0[i] = 1;
diff_in_1[i] = -1;
}
else
{
diff_in_0[i] = -1;
diff_in_1[i] = 1;
}
diff_in_0[i+count] = Dtype(0);
diff_in_1[i+count] = Dtype(0);
}
}
template <typename Dtype>
void BeGdLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
Dtype gamma = 0.5;
if (Caffe::gan_type() == "train_dnet")
{
Be_Dloss_forward_kernel<Dtype><<<CAFFE_GET_BLOCKS(num/2*channels*height*width), CAFFE_CUDA_NUM_THREADS>>>
(num/2*channels*height*width,bottom[0]->gpu_data(),bottom[1]->gpu_data(),loss_d_.mutable_gpu_data(),loss_g_.mutable_gpu_data());
caffe_gpu_asum(loss_d_.count(),loss_d_.gpu_data(),&sum_d);
caffe_gpu_asum(loss_g_.count(),loss_g_.gpu_data(),&sum_g);
//top[0]->mutable_cpu_data()[0] = (sum_d - k_ * sum_g) / (num/2*channels*height*width);
//top[0]->mutable_cpu_data()[0] = (sum_d + abs(gamma*sum_d - sum_g)) / (num/2*channels*height*width);
top[0]->mutable_cpu_data()[0] = sum_d / (num/2*channels*height*width);
}
else
{
Be_Gloss_forward_kernel<Dtype><<<CAFFE_GET_BLOCKS(num/2*channels*height*width), CAFFE_CUDA_NUM_THREADS>>>
(num/2*channels*height*width,bottom[0]->gpu_data(),bottom[1]->gpu_data(),loss_g_.mutable_gpu_data());
caffe_gpu_asum(loss_g_.count(),loss_g_.gpu_data(),&sum_g);
top[0]->mutable_cpu_data()[0] = sum_g / (num/2*channels*height*width);
this->blobs_[0]->mutable_cpu_data()[0] = 0.08;
//this->blobs_[0]->mutable_cpu_data()[0] = this->blobs_[0]->cpu_data()[0] + 0.001*(gamma*sum_d - sum_g) / Dtype(num/2*channels*height*width);
//LOG(INFO)<<"delta = "<<(gamma*sum_d - sum_g) / Dtype(num/2*channels*height*width);
//if (this->blobs_[0]->cpu_data()[0] < 0)
// this->blobs_[0]->mutable_cpu_data()[0] = 0;
//if (this->blobs_[0]->cpu_data()[0] > 1)
// this->blobs_[0]->mutable_cpu_data()[0] = 1.0;
//if (Solver<Dtype>::iter() % 100 == 0)
// LOG(INFO)<<this->blobs_[0]->cpu_data()[0];
}
}
template <typename Dtype>
void BeGdLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
if (Caffe::second_pass() == false)
{
Dtype loss_weights_ = top[0]->cpu_diff()[0] / (num/2*channels*height*width);
if (Caffe::gan_type() == "train_dnet")
{
Be_Dloss_backward_kernel<Dtype><<<CAFFE_GET_BLOCKS(num/2*channels*height*width), CAFFE_CUDA_NUM_THREADS>>>
(num/2*channels*height*width,this->blobs_[0]->cpu_data()[0],bottom[0]->gpu_data(),bottom[1]->gpu_data(),bottom[0]->mutable_gpu_diff(),bottom[1]->mutable_gpu_diff());
}
else
{
Be_Gloss_backward_kernel<Dtype><<<CAFFE_GET_BLOCKS(num/2*channels*height*width), CAFFE_CUDA_NUM_THREADS>>>
(num/2*channels*height*width,bottom[0]->gpu_data(),bottom[1]->gpu_data(),bottom[0]->mutable_gpu_diff(),bottom[1]->mutable_gpu_diff());
}
caffe_gpu_scal(bottom[0]->count(),loss_weights_,bottom[0]->mutable_gpu_diff());
caffe_gpu_scal(bottom[1]->count(),loss_weights_,bottom[1]->mutable_gpu_diff());
}
else
{
caffe_gpu_set(bottom[0]->count(),Dtype(0),bottom[0]->mutable_gpu_diff());
}
}
template <typename Dtype>
void BeGdLossLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
//Dtype sum;
//caffe_gpu_asum(bottom[0]->count(),bottom[0]->gpu_sec_diff(),&sum);
//LOG(INFO)<<sum;
}
INSTANTIATE_LAYER_GPU_FUNCS(BeGdLossLayer);
} // namespace caffe
|
ca3e349d39fa4810cbadf76e206ac1203716f467.hip | // !!! This is a file automatically generated by hipify!!!
//////////////CUDA INCLUDES///////////////
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include <hip/device_functions.h>
#include "device_launch_parameters.h"
//////////////////////////////////////////
#include "stdafx.h"
#include <iostream>
#include <stdio.h>
#include <fstream>
#include <sstream>
#include <string>
#include <windows.h>
#include <limits>
#include <math.h>
#include <vector>
#include <list>
#include <time.h>
#include <algorithm>
#include <assert.h>
#undef max
#undef min
#define M_PI 3.14159265358979323846 /* pi */
#define maxOf(a, b) (((a) > (b)) ? a : b)
using namespace std;
struct Neuron;
typedef Neuron *ptNeuron;
struct arc {
ptNeuron target = nullptr;
float weight = 0;
float oldDelta = 0;
//bool enabled = true;
};
struct interArc {
ptNeuron target = nullptr;
ptNeuron base = nullptr;
};
struct Neuron {
vector<arc> OutArcs; // to inizialize: = new vector<arc>(10);
u_int numOutArcs = 0; // numero di archi in uscita
u_int numInArcs = 0; //numero di archi in ingresso
u_int layer = 0; //indice riga del neurone
u_int column = 0; //indice della colonna del neurone
float bayes = 0.01f; //peso del bayes
float oldBayesDelta = 0; //ultima variazione del peso
//vector<float> timeBayes; // vettore delle interconnessioni temporali
vector<float> influenceInput; // vettore contenente la percentuale di influenza relativa ad ogni input
vector<float> influenceOutput; // vettore contenente la percentuale di influenza relativa all'errore retropropagato da ogni output
float output = 0; //potenziale attuale del neurone
float absOutSum = 0; //somma in valore assoluto degli input del neurone
float absDeltaSum = 0; //somma in valore assoluto delle variazioni dei pesi
float BPerr = 0; //errore di retropropagazione
int neurIdx = 0; //ogni neurone contraddistinto da un indice unico che si riferisce alla sua posizione
};
struct Layer {
vector<Neuron> Neurons;
u_int numNeurons = 0;
};
struct Omap {
float maxValue = 1;
float minValue = 0;
};
struct conMap { // struttura necessaria per l'inserimento di un nuovo neurone
u_int startLyr;
u_int startCol;
u_int arcRef;
u_int targetLyr;
u_int targetCol;
};
struct timeSeries {
list<float> evento;
};
struct example {
vector<float> input;
vector<float> Doutput;
};
struct Dataset {
vector<example> trainingSet;
vector<example> validationSet;
float triningErr = 0;
float validationErr = 0;
};
class DatasetCore {
public:
list<Dataset> Datasets;
DatasetCore() {
}
////////////////////////////////////////////////MANIPOLAZIONE DATASET//////////////////////////////////////
void readTimeSeriesCsv(string filename, int outStep, int inEx, float trainingPerc) {
//outStep - rappresenta il numero di valori per esempio output
//inEx - il numero di esempi output precedenti che vengono passati in ogni esempio input
//trainingPerc - la percentuale di dataset da usare come trainingset
cout << "caricamento del file " << filename << endl;
ifstream file(filename + ".csv");
stringstream stream;
stream << file.rdbuf();
string line, cell;
int col = 0, row = -1;
int fileRow = coutnRow(filename);
list<timeSeries> esempi(fileRow);
list<timeSeries>::iterator it = esempi.begin();
while (getline(stream, line, '\n')) { //smista righe
stringstream streamline(line);
col = 0; row++;
it->evento.resize(0);
while (getline(streamline, cell, ';')) {
//cout << stof(cell) << endl;
it->evento.push_back(stof(cell));
}
it++;
}
Dataset newSet;
example newEx;
newEx.Doutput.resize(outStep);
newEx.input.resize(inEx*outStep);
int nEx = esempi.size() - inEx;
int ntrainEx = (int)((trainingPerc / 100) * nEx);
int nvalidateEx = nEx - ntrainEx;
int sPos = 0, ePos = inEx, pos = 0; // il passo outStep
int inId = 0, outId = 0;
for (int i = 0; i < nEx; i++) {
pos = 0, inId = 0, outId = 0;
for (list<timeSeries>::iterator p = esempi.begin(); p != esempi.end(); p++) {
for (list<float>::iterator q = p->evento.begin(); q != p->evento.end(); q++) {
if (pos >= sPos && pos <= ePos) {
if (pos >= inEx + sPos) { // carico il vettore output
newEx.Doutput[outId] = *q;
outId++;
}
else { //carico il vettore di input
newEx.input[inId] = *q;
inId++;
}
}
}
pos++;
}
if (sPos < ntrainEx) { // carico il trining set
newSet.trainingSet.push_back(newEx);
}
else { //carico il validationset
newSet.validationSet.push_back(newEx);
}
//cout << sPos << endl;
sPos++;
ePos++;
}
Datasets.push_back(newSet);
}
vector<example> getDataset(int n, bool training = true) {
list<Dataset>::iterator p = Datasets.begin();
for (int k = 0; k < n; k++) p++;
if (training == true) {
return p->trainingSet;
}
else {
return p->validationSet;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////ALTRE FUNZIONI//////////////////////////////////////////////
int coutnRow(string filename) {
ifstream file(filename + ".csv");
stringstream stream;
stream << file.rdbuf();
string line;
int row = 0;
while (getline(stream, line, '\n'))row++;
return row;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
};
class Network {
public:
vector<Layer> Layers; // vettore di struct layer
vector<example> examples; // vettore di esempi per l'apprendimento
vector<Omap> map; // vettore contenente i valori di rimappatura del'output della rete
string genoma = ""; // nome del file
u_int nLayers = 0; // Layer nella rete compresi input output
u_int nNeurons = 0; // numero totale neuroni nella rete
u_int nArc = 0; //numero totale di tutti gli archi presenti nella rete
Network(string filename) {
genoma = filename;
}
////////////////////////////////////FUNZIONE COSTRUZIONE RETE DA FILE/////////////////////////////////////////////////////////
void getNetParams() {
ifstream file(genoma + ".txt");
string line, segment, pice;
int flag = 4; // 3- NL, 2-Ln, 1-NCON, 0-CON
int pos;
int lyr, neuronsInLyr, Nneuron, con; // numero layer, numero neuroni in dato layer, numero connessioni d'uscita in un dato neurone
int Tlyr, Tneuron; // target layer, target neuron
int conFlag; // 0 - inserisco la connessione, 1 - inserisco il peso
float bayes;
//file.open(filename + ".txt");
stringstream stream;
stream << file.rdbuf();
if (file.is_open()) {
//cout << "si e' apertpo" << endl;
while (getline(stream, line, '\n')) { //smista righe
stringstream streamline(line);
pos = 0;
while (getline(streamline, segment, ' ')) { // smista segmenti riga
//cout << segment << endl;
if (flag != 0 && pos == 0) {
if (segment == "CON") {
flag = 0;
}
else if (segment == "NCON") {
flag = 1;
}
else if (segment == "L0") {
flag = 2;
}
else if (segment == "NL") {
flag = 3;
}
}
if (flag == 0 && segment != "CON") { // dichiarazione delle connessioni
//cout << "flag0" << endl;
if (pos == 0) {
stringstream streamsegment(segment);
getline(streamsegment, pice, '-'); // file syntax CON \n 0-0
lyr = stoi(pice); // rimane fissato per tutta la riga
getline(streamsegment, pice);
Nneuron = stoi(pice);
conFlag = 0;
}
else if (lyr != nLayers) {
if (conFlag == 0) { //inizializzo la connessione
stringstream streamsegment(segment);
getline(streamsegment, pice, '-'); // file syntax CON \n 1-0
Tlyr = stoi(pice);
getline(streamsegment, pice, ' ');
Tneuron = stoi(pice);
Layers[lyr].Neurons[Nneuron].OutArcs[(pos - 1) / 2].target = &(Layers[Tlyr].Neurons[Tneuron]);
Layers[Tlyr].Neurons[Tneuron].numInArcs++;
conFlag = 1;
}
else { //inizzializzo il peso della connessione
Layers[lyr].Neurons[Nneuron].OutArcs[(pos - 2) / 2].weight = stof(segment);
conFlag = 0;
}
}
}
else if (flag == 1 && segment != "NCON") { // dichiarazione numero connessioni
//cout << "flag1" << endl;
if (pos == 0) {
stringstream streamsegment(segment);
getline(streamsegment, pice, '-'); // file syntax NCON \n 0-0_4
lyr = stoi(pice);
getline(streamsegment, pice, '_');
Nneuron = stoi(pice);
getline(streamsegment, pice, ':');
con = stoi(pice);
getline(streamsegment, pice, ' ');
bayes = stof(pice);
if (con >= 0) {
Layers[lyr].Neurons[Nneuron].numOutArcs = con;
nArc += con;
Layers[lyr].Neurons[Nneuron].OutArcs.resize(con, arc()); //= new vector<arc>(con); // dichiarazione numero connessioni del neurone
Layers[lyr].Neurons[Nneuron].layer = lyr;
Layers[lyr].Neurons[Nneuron].column = Nneuron;
Layers[lyr].Neurons[Nneuron].bayes = bayes;
}
}
}
else if (flag == 2) { // numero neuroni per layer
//cout << "flag2" << endl;
if (pos == 0) {
lyr = stoi(segment.erase(0, 1));
}
else {
neuronsInLyr = stoi(segment);
Layers[lyr].numNeurons = neuronsInLyr;
nNeurons += neuronsInLyr;
Layers[lyr].Neurons.resize(neuronsInLyr, Neuron()); // = new vector<Neuron>(neuronsInLyr); // dichiarazione numero di neuroni per layer
//cout << segment << " " << lyr << endl;
}
}
else if (flag == 3 && pos != 0) { // numero layer
//cout << "flag3" << endl;
Layers.resize(stoi(segment), Layer()); //= new vector<Layer>(stoi(segment)); // dichiarazione numero di layer
nLayers = stoi(segment);
//cout << segment << endl;
}
pos++;
}
}
for (int i = 0; i < Layers[nLayers - 1].numNeurons; i++) {
Layers[nLayers - 1].Neurons[i].column = i;
Layers[nLayers - 1].Neurons[i].layer = nLayers - 1;
}
}
else {
cout << genoma + ": errore nell'apertura o file non trovato" << endl;
}
file.close();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////FUNZIONE COSTRUZIONE DATASET DA FILE//////////////////////////////////////////////////////
void getDataset(string filename) {
ifstream file(filename + ".txt");
string line, segment, pice;
int flag = 4; // 3- NE, 2-IN, 1-OUT, 0-ES
int pos, esPos;
int input, output, es; // numero layer, numero neuroni in dato layer, numero connessioni d'uscita in un dato neurone
int IOFlag = 0; // 0 - input esempio, 1 - output esempio
//file.open(filename + ".txt");
stringstream stream;
stream << file.rdbuf();
if (file.is_open()) {
//cout << "si e' apertpo" << endl;
while (getline(stream, line, '\n')) { //smista righe
stringstream streamline(line);
pos = 0;
while (getline(streamline, segment, ' ')) { // smista segmenti riga
//cout << segment << endl;
if (flag != 0 && pos == 0) {
if (segment == "ES") {
flag = 0;
}
else if (segment == "OUT") {
flag = 1;
}
else if (segment == "IN") {
flag = 2;
}
else if (segment == "NE") {
flag = 3;
}
}
if (flag == 0 && segment == "ES") { // caricamento degli esmpi
//cout << "flag0" << endl;
goto examplesLoading;
}
else if (flag == 1 && pos != 0) { // numero output
//cout << "flag1" << endl;
output = stoi(segment);
if (output != numNeurons(nLayers - 1)) { cout << "Il dataset deve avere lo stesso numero di output della rete!!" << endl; ClearDataset(); return; }
}
else if (flag == 2 && pos != 0) { // numero input
//cout << "flag2" << endl;
input = stoi(segment);
if (input != numNeurons(0)) { cout << "Il dataset deve avere lo stesso numero di input della rete!!" << endl; ClearDataset(); return; }
}
else if (flag == 3 && pos != 0) { // numero esempi
//cout << "flag3" << endl;
examples.resize(stoi(segment), example()); //= new vector<Layer>(stoi(segment)); // dichiarazione numero di layer
//cout << segment << endl;
}
pos++;
}
}
examplesLoading:
esPos = 0;
while (getline(stream, line, '\n')) { //smista righe
stringstream streamline(line);
pos = 0;
if (!IOFlag) { // carico gli input
examples[esPos].input.resize(input);
while (getline(streamline, segment, ' ')) { // smista segmenti riga
examples[esPos].input[pos] = stof(segment);
pos++;
}
IOFlag = 1;
}
else { // carico gli output
examples[esPos].Doutput.resize(output);
while (getline(streamline, segment, ' ')) { // smista segmenti riga
examples[esPos].Doutput[pos] = stof(segment);
pos++;
}
IOFlag = 0;
esPos++;
}
}
}
else {
cout << genoma + ": errore nell'apertura o file non trovato" << endl;
}
file.close();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////SALVA RETE SU FILE///////////////////////////////////////////////////////////////
void saveNet(string filename = "") {
cout << "saving net.. " << endl;
ofstream file;
if (filename != "") {
file.open(filename + ".txt");
}
else {
file.open(genoma + ".txt");
}
file << "NL " << nLayers << '\n';
for (int i = 0; i < nLayers; i++) file << "L" << i << " " << Layers[i].numNeurons << '\n';
file << "NCON" << '\n';
for (int b = 0; b < nLayers; b++) {
for (int d = 0; d < Layers[b].numNeurons; d++) {
file << b << "-" << d << "_" << Layers[b].Neurons[d].numOutArcs << ":" << Layers[b].Neurons[d].bayes << '\n';
}
}
file << "CON";
for (int b = 0; b < nLayers; b++) {
//cout << "salvataggio connessioni layer:" << b << endl;
for (int d = 0; d < Layers[b].numNeurons; d++) {
file << '\n';
file << b << "-" << d;
for (int c = 0; c < Layers[b].Neurons[d].numOutArcs; c++) {
file << " ";
file << Layers[b].Neurons[d].OutArcs[c].target->layer << "-" << Layers[b].Neurons[d].OutArcs[c].target->column << " " << Layers[b].Neurons[d].OutArcs[c].weight;
}
}
}
file.close();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////SALVA DATASET SU FILE///////////////////////////////////////////////////////////////
void saveDataset(string filename) {
ofstream file(filename + ".txt");
file << "NE " << examples.size() << '\n';
file << "IN " << Layers[0].numNeurons << '\n';
file << "OUT " << Layers[nLayers - 1].numNeurons << '\n';
file << "ES" << '\n';
for (int i = 0; i < examples.size(); i++) {
for (int j = 0; j < examples[i].input.size(); j++) { // salvo gli input
file << examples[i].input[j];
if (j == examples[i].input.size() - 1) { file << '\n'; }
else { file << " "; }
}
for (int j = 0; j < examples[i].Doutput.size(); j++) { //salvo gli output
file << examples[i].Doutput[j];
if (j == examples[i].Doutput.size() - 1) { file << '\n'; }
else { file << " "; }
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////// FUNZIONI MODIFICA RETE//////////////////////////////////////////////////////////
void deleteArc(int Nlayer, int Ncolumn, int targetLayer, int targetColumn) {
for (int i = 0; i < Layers[Nlayer].Neurons[Ncolumn].numOutArcs; i++) {
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->layer == targetLayer && Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->column == targetColumn) {
//modifico i parametri della rete relazionati a tale arco
Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->numInArcs--;
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->influenceInput.size() > 0)Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->influenceInput.pop_back();
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->influenceOutput.size() > 0)Layers[Nlayer].Neurons[Ncolumn].influenceOutput.pop_back();
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->numInArcs == 0 && Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->layer != 0) {
deleteNeuron(Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->layer, Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->column);
}
else {
Layers[Nlayer].Neurons[Ncolumn].OutArcs.erase(Layers[Nlayer].Neurons[Ncolumn].OutArcs.begin() + i);
Layers[Nlayer].Neurons[Ncolumn].numOutArcs--;
}
return;
}
}
cout << "errore: arco (" << Nlayer << "-" << Ncolumn << ") -> (" << targetLayer << "-" << targetColumn << ") non trovato, eliinazione fallita" << endl;
}
void deleteArcByRef(int Nlayer, int Ncolumn, arc *targetArc) { //elimina un arco con gli indici della base e il puntatore alla connsessione
//modifico i parametri della rete relazionati a tale arco
targetArc->target->numInArcs--;
if (targetArc->target->influenceInput.size() > 0)targetArc->target->influenceInput.pop_back();
if (Layers[Nlayer].Neurons[Ncolumn].influenceOutput.size() > 0)Layers[Nlayer].Neurons[Ncolumn].influenceOutput.pop_back();
if (targetArc->target->numInArcs == 0) {
deleteNeuron(targetArc->target->layer, targetArc->target->column);
}
else {
Layers[Nlayer].Neurons[Ncolumn].OutArcs.erase(Layers[Nlayer].Neurons[Ncolumn].OutArcs.begin() + getOutConTargetID(getTarget(Nlayer, Ncolumn), targetArc->target));
Layers[Nlayer].Neurons[Ncolumn].numOutArcs--;
}
}
void deleteNeuron(int Nlayer, int Ncolumn) {
// elimino l'arco dai neuroni che puntano al nodo da eliminare e
// decremento il numero di archi in output hai neuroni con come target il neurone da eliminare
for (int i = 0; i < Nlayer; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
if (Layers[i].Neurons[j].OutArcs[k].target->layer == Nlayer && Layers[i].Neurons[j].OutArcs[k].target->column == Ncolumn) {
deleteArcByRef(i, j, &Layers[i].Neurons[j].OutArcs[k]);
}
}
}
}
// eliminio gli archi del neurone da eliminare
for (int i = 0; i < Layers[Nlayer].Neurons[Ncolumn].numOutArcs; i++) {
deleteArcByRef(Nlayer, Ncolumn, &Layers[Nlayer].Neurons[Ncolumn].OutArcs[i]);
}
// decremento l'indice colonna dei neuroni successivi nello stesso layer
for (int i = Ncolumn + 1; i < Layers[Nlayer].numNeurons; i++) Layers[Nlayer].Neurons[i].column--;
// elimino il neurone
Layers[Nlayer].Neurons.erase(Layers[Nlayer].Neurons.begin() + Ncolumn);
Layers[Nlayer].numNeurons--;
nNeurons--;
}
void addArc(int Nlayer, int Ncolumn, int targetLayer, int targetColumn) {
arc newArc; newArc.target = &(Layers[targetLayer].Neurons[targetColumn]); newArc.weight = 0.01f;
Layers[targetLayer].Neurons[targetColumn].numInArcs++;
Layers[Nlayer].Neurons[Ncolumn].OutArcs.push_back(newArc);
Layers[Nlayer].Neurons[Ncolumn].numOutArcs++;
}
void addNeuron(int Nlayer, float inConFill, float outConFill) {
//init new neuron
Neuron newNeur;
newNeur.layer = Nlayer;
newNeur.column = Layers[Nlayer].numNeurons; // column start at 0 , numNeurons start at 1
int nBackNeurons = 0; for (int i = 0; i < Nlayer; i++) nBackNeurons += Layers[i].numNeurons;
int nFrontNeurons = 0; for (int i = Nlayer + 1; i < nLayers; i++) nFrontNeurons += Layers[i].numNeurons;
newNeur.numOutArcs = (int)(outConFill*nFrontNeurons);
newNeur.numInArcs = (int)(inConFill*nBackNeurons);
newNeur.OutArcs.resize(newNeur.numOutArcs);
//bind neuron with neuron of front layers (OUTPUT CONNECTIONS)
int x, y = 0;
vector<u_int> OutArcAcces = casualVector(newNeur.numOutArcs);
vector<u_int> NetAcces = casualVector(nFrontNeurons);
while (y < OutArcAcces.size()) {
x = 0;
for (int i = Nlayer + 1; i < nLayers; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
if (y > OutArcAcces.size() - 1) continue;
if (NetAcces[y] == x) {
newNeur.OutArcs[OutArcAcces[y]].target = &(Layers[i].Neurons[j]);
newNeur.OutArcs[OutArcAcces[y]].weight = 0.01f;
y++;
}
x++;
}
}
}
cout << "connessioni output create" << endl << endl;
WeightsStamp("w");
// inserisco il neurone nella rete
//NOTA push_back() reinizializa il vettore modificandi i puntatori alle celle, tutte le connessioni al layer che viene aggiornato vengono perse
vector<conMap> connections = saveConsTowardsLyr(Nlayer); // salvo le connessioni al layer
Layers[Nlayer].Neurons.push_back(newNeur); // immetto il nuovo neurone (i riferimenti del vettore cambiano)
Layers[Nlayer].numNeurons++;
loadConsTowardsLyr(connections); // aggiorno i puntatori collegati ai neuroni di questo vettore
WeightsStamp("w");
cout << "neurone inserito" << endl << endl;
// bind neuron with back layers (INPUT CONNECTIONS)
NetAcces = casualVector(nBackNeurons);
arc newArc; newArc.weight = 0.01f;
y = 0;
while (y < newNeur.numInArcs) {
x = 0;
for (int i = 0; i < Nlayer; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
if (y > newNeur.numInArcs - 1) continue;
if (NetAcces[y] == x) {
newArc.target = &(Layers[Nlayer].Neurons[Layers[Nlayer].numNeurons - 1]);
//Layers[Nlayer].Neurons[Layers[Nlayer].numNeurons - 1].numInArcs++;
Layers[i].Neurons[j].OutArcs.push_back(newArc);
Layers[i].Neurons[j].numOutArcs++;
cout << "BIND:" << y << " (" << i << "-" << j << ") -> (" << Nlayer << "-" << Layers[Nlayer].numNeurons - 1 << ") " << endl;
WeightsStamp("w");
y++;
}
x++;
}
}
}
nNeurons++;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////FUNZIONI VARIE///////////////////////////////////////////////////////////
float DsigOut(int Layer, int Neuron) {
//derivata della funzione sigmoide (Y*(1-Y))
//RICORDA di aggiungere k se implementi la sensibilit della sigmoide
return Layers[Layer].Neurons[Neuron].output*(1 - Layers[Layer].Neurons[Neuron].output);
}
float sigmoid(float x) { return 1 / (1 + exp(-x)); } // Sigmoide
float logit(float x) { return log(x / (1 - x)); } // funzione sigmoide inversa
float gaussian(float x, float mu, float var) { return (1 / (var*sqrt(2 * M_PI)))*exp(-((pow(x - mu, 2.0)) / (2 * var))); } // Gaussiana(float x, float mu, float var)
void WeightsStamp(string mode = "a") {
//m - stampa le medie dei pesi di ogni layer
//a - stampa tutti i pesi della rete con alcuni parametri di apprendimento
//w - stampa tutti i pesi con il riferimento riga colonna al target
//fc - stampa le medie dei gruppi di pesi tra due layer
if (mode == "m") { // medie dei layer
float mean;
int x;
for (int i = 0; i < numLayers() - 1; i++) {
x = 0;
mean = 0;
for (int c = 0; c < numNeurons(i); c++) {
for (int d = 0; d < numCon(i, c); d++) {
mean += getWeight(i, c, d);
x++;
}
}
mean /= x;
cout << "Layer " << i << " weights:" << mean << endl;
}
}
else if (mode == "a") { // stampa tutti i pesi e i parametri dei neuroni
for (int i = 0; i < numLayers() - 1; i++) {
for (int c = 0; c < numNeurons(i); c++) {
cout << "(" << i << "-" << c << ") output: " << getOutput(i, c) << " BPerr: " << getBPerr(i, c) << endl;
for (int d = 0; d < numCon(i, c); d++) {
cout << getWeight(i, c, d) << " (" << getDeltaWeight(i, c, d) << ") ";
}
cout << endl;
}
}
}
else if (mode == "w") { // stampa tutti i pesi con il riferimento al target
for (int i = 0; i < numLayers() - 1; i++) {
for (int c = 0; c < numNeurons(i); c++) {
cout << "(" << i << "-" << c << ")" << "Con: " << numCon(i, c) << " InCon: " << numInCon(i, c) << endl;
for (int d = 0; d < numCon(i, c); d++) {
cout << getWeight(i, c, d) << " (" << getConTargetLyr(i, c, d) << "," << getConTargetCol(i, c, d) << ") ";
}
cout << endl;
}
}
}
else if (mode == "fc") { // stampa le medie dei pesi tra layer e layer
vector<float> wLyr(nLayers - 1);
vector<int> nwLyr(nLayers - 1);
float wl = 0;
for (int i = 0; i < nLayers - 1; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
wLyr[Layers[i].Neurons[j].OutArcs[k].target->layer - 1] += Layers[i].Neurons[j].OutArcs[k].weight;
nwLyr[Layers[i].Neurons[j].OutArcs[k].target->layer - 1]++;
}
}
cout << "Layer " << i << " weights: ";
for (int j = 0; j < wLyr.size(); j++) {
wLyr[j] /= nwLyr[j];
if (j + 1 > i)cout << "(" << i << ", " << j + 1 << ") [" << nwLyr[j] << "] " << wLyr[j] << " ";
}
cout << endl;
fill(wLyr.begin(), wLyr.end(), 0);
fill(nwLyr.begin(), nwLyr.end(), 0);
}
}
else {
cout << "WeightsStamp() argument error!" << endl;
}
cout << endl;
}
void sigLayer(int lyr) { // applica la sigmoide a tutti i campi output dei neuroni nel layer specificato
for (int i = 0; i < Layers[lyr].numNeurons; i++) {
Layers[lyr].Neurons[i].output = sigmoid(Layers[lyr].Neurons[i].output);
}
}
void bayesLayer(int lyr, bool absSum = false) {//applica il bayes all'output di ogni neurone del dato layer
for (int i = 0; i < Layers[lyr].numNeurons; i++) {
Layers[lyr].Neurons[i].output += Layers[lyr].Neurons[i].bayes;
}
// se la variabile absSum true sommo il bayes in valore assoluto alla variabile absOutSum
if (absSum == true) { for (int i = 0; i < Layers[lyr].numNeurons; i++) { Layers[lyr].Neurons[i].absOutSum += abs(Layers[lyr].Neurons[i].bayes); } }
}
void resetPotential() {
// esegue il reset del potenziale di tutti i neuroni della rete
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < numNeurons(i); j++) {
Layers[i].Neurons[j].output = 0;
}
}
}
void resetAbsSumPotenzial() {
// esegue il reset della sommatoria di ogni input in valore assoluto di ogni neurone
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < numNeurons(i); j++) {
Layers[i].Neurons[j].absOutSum = 0;
}
}
}
void resetAbsSumDelta() {
// esegue il reset della sommatoria di ogni input in valore assoluto di ogni neurone
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < numNeurons(i); j++) {
Layers[i].Neurons[j].absDeltaSum = 0;
}
}
}
void resetBPerr() { // esegue il reset dell'errore retropropagato in ogni neurone
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < numNeurons(i); j++) {
Layers[i].Neurons[j].BPerr = 0;
}
}
}
void resetNeuronsID() {
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
Layers[i].Neurons[j].layer = i;
Layers[i].Neurons[j].column = j;
}
}
}
vector<conMap> saveConsTowardsLyr(int Layer) { // salva su vettore i riferimenti numerici delle connesioni verso il layer specificato
vector<conMap> connections;
conMap con;
for (int i = 0; i < Layer; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
if (Layers[i].Neurons[j].OutArcs[k].target->layer == Layer) {
con.startCol = j;
con.startLyr = i;
con.arcRef = k;
con.targetCol = Layers[i].Neurons[j].OutArcs[k].target->column;
con.targetLyr = Layers[i].Neurons[j].OutArcs[k].target->layer;
connections.push_back(con);
}
}
}
}
return connections;
}
void loadConsTowardsLyr(vector<conMap> con) { // ricarica i riferimenti numerici delle connesioni verso il layer specificato
for (int i = 0; i < con.size(); i++) {
Layers[con[i].startLyr].Neurons[con[i].startCol].OutArcs[con[i].arcRef].target = getTarget(con[i].targetLyr, con[i].targetCol);
}
}
void ClearDataset() { examples.clear(); }
void genTestDataset(int nExe, int nIn, int nOut, float step, int type, float offset) { //generazione di una serie storica del seno (DEBUG)
cout << "creating examples.." << endl;
examples.resize(nExe);
float x = 0, x2 = 0;
for (int i = 0; i < nExe; i++) {
examples[i].input.resize(nIn);
examples[i].Doutput.resize(nOut);
switch (type) {
case 0: // debug dataset
for (int j = nIn - 1; j >= 0; j--) {
examples[i].input[j] = 1 + offset;
}
for (int j = 0; j < nOut; j++) {
examples[i].Doutput[j] = 1 + offset;
}
break;
case 1: // funzione predizione
for (int j = 0; j < nIn; j++) {
examples[i].input[j] = sin(x2) + offset;
x2 += step;
}
for (int j = 0; j < nOut; j++) {
examples[i].Doutput[j] = sin(x2) + offset;
x2 += step;
}
x += step;
x2 = x;
break;
case 2: // funzione uguale
for (int j = 0; j < nIn; j++) {
examples[i].input[j] = sin(x2) + offset;
x2 += step;
}
x2 = x;
for (int j = 0; j < nOut; j++) {
examples[i].Doutput[j] = sin(x2) + offset;
x2 += step;
}
x += step;
x2 = x;
break;
case 3: // funzione specchio
for (int j = nIn - 1; j >= 0; j--) {
examples[i].input[j] = sin(x2) + offset;
x2 += step;
}
x2 = x;
for (int j = 0; j < nOut; j++) {
examples[i].Doutput[j] = sin(x2) + offset;
x2 += step;
}
x += step;
x2 = x;
break;
}
}
}
void setNetMap(float max, float min) {
map.resize(Layers[nLayers - 1].numNeurons);
for (int i = 0; i < map.size(); i++) {
map[i].maxValue = max;
map[i].minValue = min;
}
}
float reverseMap(int neur, float val) {
return (val - map[neur].minValue) / (map[neur].maxValue - map[neur].minValue);
}
vector<u_int> casualVector(int in, int start = 0) {
// crea un vettore di n ellementi successivi e li disordina
// creazione di una tabella di accesso casuale per un secondo vettore
vector<u_int> out(in);
for (int i = 0; i < out.size(); i++) out[i] = i + start;
random_shuffle(out.begin(), out.end());
return out;
}
/*vector<T, A>*/
template<typename T, typename A>
void shackeVector(vector<T, A> const& vec) {
//esegue il mescolamento degli elementi all'interno di un oggetto vector
random_shuffle(vec.begin(), vec.end());
//return vec;
}
void refreshNeurIdx() {
int idx = 0;
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < numNeurons(i); j++) {
getTarget(i, j)->neurIdx = idx++;
}
}
}
void datasetOffset(float offset) {
for (int i = 0; i < examples.size(); i++) {
for (int j = 0; j < examples[i].Doutput.size(); j++) {
examples[i].Doutput[j] += offset;
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////ACCESSO A VARIABILI PRIVATE///////////////////////////////////////////////////
int numLayers() { return nLayers; }
int numNeurons(int Layer) { return Layers[Layer].numNeurons; }
int numCon(int Layer, int Neuron) { return Layers[Layer].Neurons[Neuron].numOutArcs; }
int numInCon(int Layer, int Neuron) { return Layers[Layer].Neurons[Neuron].numInArcs; }
int getConTargetLyr(int Layer, int Neuron, int Arc) { return (int)Layers[Layer].Neurons[Neuron].OutArcs[Arc].target->layer; }
int getConTargetCol(int Layer, int Neuron, int Arc) { return (int)Layers[Layer].Neurons[Neuron].OutArcs[Arc].target->column; }
float getWeight(int Layer, int Neuron, int Arc) { return Layers[Layer].Neurons[Neuron].OutArcs[Arc].weight; }
float getDeltaWeight(int Layer, int Neuron, int Arc) { return Layers[Layer].Neurons[Neuron].OutArcs[Arc].oldDelta; }
float getOutput(int Layer, int Neuron) { return Layers[Layer].Neurons[Neuron].output; }
float getBPerr(int Layer, int Neuron) { return Layers[Layer].Neurons[Neuron].BPerr; }
ptNeuron getTarget(int Layer, int Neuron) { return &(Layers[Layer].Neurons[Neuron]); }
ptNeuron getConTarget(int Layer, int Neuron, int Conn) { return Layers[Layer].Neurons[Neuron].OutArcs[Conn].target; }
int getOutConTargetID(ptNeuron base, ptNeuron target) {
for (int i = 0; i < base->numOutArcs; i++) {
if (base->OutArcs[i].target == target) {
return i;
}
}
cout << "connessione non trovata ERRORE!" << endl;
return -1;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////WINDOWS HIGH SPEED TIMING//////////////////////////////////////////////////////
BOOL WINAPI QueryPerformanceCounter(_Out_ LARGE_INTEGER *lpPerformanceCount);
BOOL WINAPI QueryPerformanceFrequency(_Out_ LARGE_INTEGER *lpFrequency);
inline long long PerformanceCounter() noexcept
{
LARGE_INTEGER li;
::QueryPerformanceCounter(&li);
return li.QuadPart;
}
inline long long PerformanceFrequency() noexcept
{
LARGE_INTEGER li;
::QueryPerformanceFrequency(&li);
return li.QuadPart;
}
/* HOW TO USE:
long long t0 = PerformanceCounter();
//code to bench..
long long t1 = PerformanceCounter();
double elapsedMilliseconds = ((t1 - t0) * 1000.0) / PerformanceFrequency();
*/
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
};
class MLP : public Network {
protected:
//Hopfield *Supporter;
public:
float NetPerformance = 0; // tempo di esecuzione medio in millisecondi
float NetErrPercent = 0; //errore percentuale medio associato alla rete
MLP(string filename) :Network(filename) {};
////////////////////////////////////////////////FUNZIONI CREAZIONE RETE///////////////////////////////////////////////////////
//CREAZIONE RETE QUADRATA
void qubeNet(int Nlayers, int Ncolumns, int input, int output, bool c, float initValue = 0.01f) { //TODO inserire n neuroni di input e output!!
// testata e funzionante
// nlayers - numero layer della rete
// Ncolumns - numero neuroni per strato
// c - se true inizializa casualmente i pesi altrimenti sono inizializati tutti a 1
Layers.resize(Nlayers, Layer()); //= new vector<Layer>(stoi(segment)); // dichiarazione numero di layer
nLayers = Nlayers;
cout << "Declaring structure...." << endl;
//per lo strato di input
Layers[0].numNeurons = input;
Layers[0].Neurons.resize(input, Neuron());
nNeurons += input;
for (int j = 0; j < input; j++) {
Layers[0].Neurons[j].OutArcs.resize(Ncolumns, arc());
Layers[0].Neurons[j].numOutArcs = Ncolumns;
Layers[0].Neurons[j].layer = 0;
Layers[0].Neurons[j].column = j;
nArc += Ncolumns;
}
for (int i = 1; i < Nlayers - 1; i++) {
// per gli strati intermedi
Layers[i].numNeurons = Ncolumns;
Layers[i].Neurons.resize(Ncolumns, Neuron());
nNeurons += Ncolumns;
for (int j = 0; j < Ncolumns; j++) {
if (i < Nlayers - 2) {
Layers[i].Neurons[j].OutArcs.resize(Ncolumns, arc());
Layers[i].Neurons[j].numOutArcs = Ncolumns;
nArc += Ncolumns;
}
else {
Layers[i].Neurons[j].OutArcs.resize(output, arc());
Layers[i].Neurons[j].numOutArcs = output;
nArc += output;
}
Layers[i].Neurons[j].layer = i;
Layers[i].Neurons[j].column = j;
}
}
//per lo strato di output
Layers[Nlayers - 1].numNeurons = output;
Layers[Nlayers - 1].Neurons.resize(output, Neuron());
nNeurons += output;
for (int j = 0; j < output; j++) {
Layers[Nlayers - 1].Neurons[j].layer = Nlayers - 1;
Layers[Nlayers - 1].Neurons[j].column = j;
}
float initWeight = initValue;
float Q = sqrt(3)*sqrt(2);
int M = 10;
cout << "initializing wheights...." << endl;
for (int j = 0; j < input; j++) {
for (int k = 0; k < Ncolumns; k++) {
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() - 80); initWeight = (rand() % M) / Q; } }
Layers[0].Neurons[j].OutArcs[k].target = &(Layers[1].Neurons[k]);
Layers[0].Neurons[j].OutArcs[k].weight = initWeight;
Layers[1].Neurons[k].numInArcs++;
}
}
for (int i = 1; i < Nlayers - 2; i++) {
//cout << "Layer:" << i << endl;
for (int j = 0; j < Ncolumns; j++) {
for (int k = 0; k < Ncolumns; k++) {
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() + 50); initWeight = (rand() % M) / Q; } }
Layers[i].Neurons[j].OutArcs[k].target = &(Layers[i + 1].Neurons[k]);
Layers[i].Neurons[j].OutArcs[k].weight = initWeight;
Layers[i + 1].Neurons[k].numInArcs++;
}
}
}
for (int j = 0; j < Ncolumns; j++) {
for (int k = 0; k < output; k++) {
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() / 0.7556); initWeight = (rand() % M) / Q; } }
Layers[Nlayers - 2].Neurons[j].OutArcs[k].target = &(Layers[Nlayers - 1].Neurons[k]);
Layers[Nlayers - 2].Neurons[j].OutArcs[k].weight = initWeight;
Layers[Nlayers - 1].Neurons[k].numInArcs++;
}
}
refreshNeurIdx();
}
//CREAZIONE RETE QUADRATA COMPLETAMENTE CONNESSA
void qubeNetFC(int Nlayers, int Ncolumns, int input, int output, bool c, float initValue = 0.01f) {
//TODO inserire n neuroni di input e output!!
// testata e funzionante
// nlayers - numero layer della rete
// Ncolumns - numero neuroni per strato
// c - se true inizializa casualmente i pesi altrimenti sono inizializati tutti a 1
Layers.resize(Nlayers, Layer()); //= new vector<Layer>(stoi(segment)); // dichiarazione numero di layer
nLayers = Nlayers;
cout << "Declaring structure...." << endl;
//per lo strato di input
Layers[0].numNeurons = input;
Layers[0].Neurons.resize(input, Neuron());
nNeurons += input;
int numA = (nLayers - 2)*Ncolumns + output;
nArc += input * numA;
for (int j = 0; j < input; j++) {
Layers[0].Neurons[j].OutArcs.resize(numA, arc());
Layers[0].Neurons[j].numOutArcs = numA;
Layers[0].Neurons[j].layer = 0;
Layers[0].Neurons[j].column = j;
}
for (int i = 1; i < Nlayers - 1; i++) {
// per gli strati intermedi
numA = (Nlayers - i - 2)*Ncolumns + output;
Layers[i].numNeurons = Ncolumns;
Layers[i].Neurons.resize(Ncolumns, Neuron());
nNeurons += Ncolumns;
for (int j = 0; j < Ncolumns; j++) {
if (i < Nlayers - 2) {
Layers[i].Neurons[j].OutArcs.resize(numA, arc());
Layers[i].Neurons[j].numOutArcs = numA;
nArc += numA;
}
else {
Layers[i].Neurons[j].OutArcs.resize(output, arc());
Layers[i].Neurons[j].numOutArcs = output;
nArc += output;
}
Layers[i].Neurons[j].layer = i;
Layers[i].Neurons[j].column = j;
}
}
//per lo strato di output
Layers[Nlayers - 1].numNeurons = output;
Layers[Nlayers - 1].Neurons.resize(output, Neuron());
nNeurons += output;
for (int j = 0; j < output; j++) {
Layers[Nlayers - 1].Neurons[j].layer = Nlayers - 1;
Layers[Nlayers - 1].Neurons[j].column = j;
}
float initWeight = initValue;
float Q = sqrt(3)*sqrt(2);
int M = 10;
int arcRef = 0;
cout << "initializing weights...." << endl;
// inizializzo pesi strato input
for (int j = 0; j < input; j++) {
for (int k = 0; k < (Nlayers - 2); k++) {
for (int n = 0; n < Ncolumns; n++) {
numA = k * Ncolumns + n;
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() - 80); initWeight = (rand() % M) / Q; } }
Layers[0].Neurons[j].OutArcs[numA].target = &(Layers[k + 1].Neurons[n]);
Layers[0].Neurons[j].OutArcs[numA].weight = initWeight;
Layers[k + 1].Neurons[n].numInArcs++;
}
}
for (int n = 0; n < output; n++) {
numA = (Nlayers - 2) * Ncolumns + n;
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() - 80); initWeight = (rand() % M) / Q; } }
Layers[0].Neurons[j].OutArcs[numA].target = &(Layers[Nlayers - 1].Neurons[n]);
Layers[0].Neurons[j].OutArcs[numA].weight = initWeight;
Layers[Nlayers - 1].Neurons[n].numInArcs++;
}
}
//inizializzo pesi strati intermedi
for (int i = 1; i < Nlayers - 2; i++) {
//cout << "Layer:" << i << endl;
for (int j = 0; j < Ncolumns; j++) {
for (int k = i + 1; k < Nlayers - 1; k++) {
for (int w = 0; w < Ncolumns; w++) {
numA = (k - i - 1)*Ncolumns + w;
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() + 50); initWeight = (rand() % M) / Q; } }
Layers[i].Neurons[j].OutArcs[numA].target = &(Layers[k].Neurons[w]);
Layers[i].Neurons[j].OutArcs[numA].weight = initWeight;
Layers[k].Neurons[w].numInArcs++;
}
}
for (int k = 0; k < output; k++) {
numA = (Nlayers - 2 - i)*Ncolumns + k;
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() + 50); initWeight = (rand() % M) / Q; } }
Layers[i].Neurons[j].OutArcs[numA].target = &(Layers[Nlayers - 1].Neurons[k]);
Layers[i].Neurons[j].OutArcs[numA].weight = initWeight;
Layers[Nlayers - 1].Neurons[k].numInArcs++;
}
}
}
//inizializzo pesi strato output
for (int j = 0; j < Ncolumns; j++) {
for (int k = 0; k < output; k++) {
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() / 0.7556); initWeight = (rand() % M) / Q; } }
Layers[Nlayers - 2].Neurons[j].OutArcs[k].target = &(Layers[Nlayers - 1].Neurons[k]);
Layers[Nlayers - 2].Neurons[j].OutArcs[k].weight = initWeight;
Layers[Nlayers - 1].Neurons[k].numInArcs++;
}
}
refreshNeurIdx();
}
//CREAZIONE RETE CUSTOM
void customNet(int Nlayers, vector<int> Ncolumns, float conFill) {
//TODO aggiungere la conta delle connessioni in nArc durante la dichiarazione
if (Ncolumns.size() != Nlayers) { cout << "costumNet() FAILED! parameters error" << endl; return; } //errore
//dichiarazione layers
Layers.resize(Nlayers);
nLayers = Nlayers;
//dichiarazione neuroni per layer
for (int i = 0; i < Nlayers; i++) {
Layers[i].Neurons.resize(Ncolumns[i]);
Layers[i].numNeurons = Ncolumns[i];
nNeurons += Ncolumns[i];
for (int j = 0; j < Layers[i].numNeurons; j++) {
Layers[i].Neurons[j].layer = i;
Layers[i].Neurons[j].column = j;
}
}
//dichiarazione connessioni in uscita di ogni neurone
for (int i = 0; i < nLayers - 1; i++) {
cout << "Layer " << i << endl;
for (int j = 0; j < Layers[i].numNeurons; j++) {
cout << "Neuron " << j << endl;
int nFrontNeurons = 0; for (int g = i + 1; g < nLayers; g++) nFrontNeurons += Layers[g].numNeurons;
Layers[i].Neurons[j].numOutArcs = conFill * nFrontNeurons;
Layers[i].Neurons[j].OutArcs.resize(conFill* nFrontNeurons);
vector<u_int> OutArcAcces = casualVector(Layers[i].Neurons[j].numOutArcs);
vector<u_int> NetAcces = casualVector(nFrontNeurons);
int x, y = 0;
while (y < OutArcAcces.size()) {
x = 0;
for (int k = i + 1; k < nLayers; k++) {
for (int w = 0; w < Layers[k].numNeurons; w++) {
if (y > OutArcAcces.size() - 1) continue;
if (NetAcces[y] == x) {
Layers[i].Neurons[j].OutArcs[OutArcAcces[y]].target = &(Layers[k].Neurons[w]);
Layers[i].Neurons[j].OutArcs[OutArcAcces[y]].weight = 0.01f;
Layers[k].Neurons[w].numInArcs++;
y++; cout << y << endl;
}
x++;
}
}
}
}
}
refreshNeurIdx();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////STIMOLAZIONE RETE/////////////////////////////////////////////////////////////////
//procedura di propagazione dell'informazione
void inputNet(vector<float> &input, vector<float> &output) {
if (input.size() != Layers[0].numNeurons) { cout << "il vettore in input deve avere dim " << Layers[0].numNeurons << endl; return; }
if (output.size() != Layers[nLayers - 1].numNeurons) { cout << "il vettore in output deve avere dim " << Layers[nLayers - 1].numNeurons << endl; return; }
// carico il vettore di input nella rete
for (int i = 0; i < Layers[0].numNeurons; i++) {
Layers[0].Neurons[i].output = input[i];
}
//sigLayer(0);
// propagazione dello stimolo
for (int i = 0; i < nLayers - 1; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
Layers[i].Neurons[j].OutArcs[k].target->output += Layers[i].Neurons[j].output*Layers[i].Neurons[j].OutArcs[k].weight;
}
}
bayesLayer(i + 1);
sigLayer(i + 1);
}
float delta;
if (map.size() == 0) { cout << "Errore ... la mappatura degli output non stata settata!!"; return; }
for (int i = 0; i < Layers[nLayers - 1].numNeurons; i++) {
delta = map[i].maxValue - map[i].minValue;
output[i] = ((Layers[nLayers - 1].Neurons[i].output)*delta) + map[i].minValue;
Layers[nLayers - 1].Neurons[i].output = output[i];
}
}
//esegue una propagazione dell'informazione salvando lo storico di propagazione degli input
void inputNetProfiler(vector<float> &input, vector<float> &output) {
if (input.size() != Layers[0].numNeurons) { cout << "il vettore in input deve avere dim " << Layers[0].numNeurons << endl; return; }
if (output.size() != Layers[nLayers - 1].numNeurons) { cout << "il vettore in output deve avere dim " << Layers[nLayers - 1].numNeurons << endl; return; }
// carico il vettore di input nella rete
for (int i = 0; i < Layers[0].numNeurons; i++) {
Layers[0].Neurons[i].output = input[i];
//Layers[0].Neurons[i].absOutSum = abs(input[i]);
}
//sigLayer(0);
// propagazione dello stimolo
for (int i = 0; i < nLayers - 1; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
Layers[i].Neurons[j].OutArcs[k].target->output += Layers[i].Neurons[j].output*Layers[i].Neurons[j].OutArcs[k].weight;
Layers[i].Neurons[j].OutArcs[k].target->absOutSum += abs(Layers[i].Neurons[j].output*Layers[i].Neurons[j].OutArcs[k].weight);
}
}
bayesLayer(i + 1, true);
sigLayer(i + 1);
}
resetVectorsProfiler(true, false);// resetto tutti i vettori
//inizializzo la propagazione dal primo layer
int IDinf = 0;
for (int i = 0; i < Layers[1].numNeurons; i++) { // scorro i neuroni del primo strato nascosto
for (int j = 0; j < Layers[0].numNeurons; j++) { // scorro i neuroni dello strato di input
for (int k = 0; k < numCon(0, j); k++) { // scorro le connessioni del neurone dello strato di input
if (&Layers[1].Neurons[i] == Layers[0].Neurons[j].OutArcs[k].target) {
Layers[1].Neurons[i].influenceInput[IDinf] = (abs(getTarget(0, j)->output*getTarget(0, j)->OutArcs[k].weight) / (getTarget(0, j)->OutArcs[k].target->absOutSum));
IDinf++;
}
}
}
IDinf = 0;
}
//inizzializzo la propagazione negli altri layer
for (int t = 1; t < nLayers - 1; t++) { // togliere il meno uno
IDinf = 0;
for (int i = 0; i < Layers[t + 1].numNeurons; i++) {
for (int j = 0; j < t + 1; j++) {
for (int w = 0; w < Layers[j].numNeurons; w++) {
for (int k = 0; k < numCon(j, w); k++) {
if (&Layers[t + 1].Neurons[i] == Layers[j].Neurons[w].OutArcs[k].target) {
Layers[t + 1].Neurons[i].influenceInput[IDinf] = (abs(getTarget(j, w)->output*getTarget(j, w)->OutArcs[k].weight) / (getTarget(j, w)->OutArcs[k].target->absOutSum));
IDinf++;
}
}
}
}
IDinf = 0;
}
}
float delta;
if (map.size() == 0) {
cout << "Errore ... la mappatura degli output non stata settata!!" << endl;
return;
}
for (int i = 0; i < Layers[nLayers - 1].numNeurons; i++) {
delta = map[i].maxValue - map[i].minValue;
output[i] = ((Layers[nLayers - 1].Neurons[i].output)*delta) + map[i].minValue;
Layers[nLayers - 1].Neurons[i].output = output[i];
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////FUNZIONI DI ADDESTRAMENTO MLP////////////////////////////////////////////////////////
//Algoritmo di addestramento Back-propagation
void BP(int iter, float eps, float beta, float ErrPercent) {
float err = 0; // errore del singolo neurone
float Err = 0; // errore quadratio del singolo esempio
float Gerr = 0; // errore quadratico dell'intero dataset
float Perr = 0;
float delta = 0;
long long t0 = 0;
long long t1 = 0;
long long inT1 = 0;
double elapsedMilliseconds = 0;
double executionTime = 0;
double inputTime = 0;
int x = 0;
vector<float> Out(numNeurons(nLayers - 1)); // vettore supporto output rete
vector<float> GerrStory;
for (int t = 0; t < iter; t++) {
Gerr = 0;
Perr = 0;
for (int e = 0; e < examples.size(); e++) {
t0 = PerformanceCounter();
// eseguo l'esempio
inputNet(examples[e].input, Out);
t1 = PerformanceCounter();
elapsedMilliseconds = ((t1 - t0) * 1000.0) / PerformanceFrequency();
inputTime += elapsedMilliseconds;
// clacolo il delta delle variazioni dei pesi dello strato di output
Err = 0;
for (int r = 0; r < Out.size(); r++) {
delta = map[r].maxValue - map[r].minValue;
err = Out[r] - examples[e].Doutput[r];
Perr += abs(err / examples[e].Doutput[r]) * 100; // errore percentuale
Err += pow(err, 2);
if (x == e && t > -1) {
cout << "es." << x << " Y" << r << " = " << Out[r] << " " << "D" << r << " = " << examples[e].Doutput[r] << " err: " << abs(Out[r] - examples[e].Doutput[r]) << endl;
if (r == Out.size() - 1) { x--; WeightsStamp("fc"); /*Sleep(5000);*/ }
if (x < 0) x = examples.size() - 1;
}
//mErr = ((err- map[r].minValue)/ delta) ;
// calcolo il delta della variazione dei pesi
Layers[nLayers - 1].Neurons[r].BPerr = reverseMap(r, err) * (reverseMap(r, Out[r]) *(1 - reverseMap(r, Out[r])));
//applico le correzioni ai bayes
Layers[nLayers - 1].Neurons[r].oldBayesDelta = (-eps * (Layers[nLayers - 1].Neurons[r].BPerr)) + beta * Layers[nLayers - 1].Neurons[r].oldBayesDelta;
Layers[nLayers - 1].Neurons[r].bayes += Layers[nLayers - 1].Neurons[r].oldBayesDelta;
}
//applico le correzioni ai pesi dello strato di output
for (int i = 0; i < numNeurons(nLayers - 2); i++) {
for (int j = 0; j < numCon(nLayers - 2, i); j++) {
Layers[nLayers - 2].Neurons[i].OutArcs[j].oldDelta = (-eps * (Layers[nLayers - 2].Neurons[i].OutArcs[j].target->BPerr)*Layers[nLayers - 2].Neurons[i].output) + beta * Layers[nLayers - 2].Neurons[i].OutArcs[j].oldDelta;
Layers[nLayers - 2].Neurons[i].OutArcs[j].weight += Layers[nLayers - 2].Neurons[i].OutArcs[j].oldDelta;
}
}
// rieseguo la procedura per tutti gli altri strati
for (int i = nLayers - 2; i > 0; i--) { // dal penultimo strato al secondo
// clacolo il delta delle variazioni dei pesi dello strato i-1
for (int j = 0; j < numNeurons(i); j++) {
err = 0;
for (int k = 0; k < numCon(i, j); k++) err += Layers[i].Neurons[j].OutArcs[k].target->BPerr * Layers[i].Neurons[j].OutArcs[k].weight;
Layers[i].Neurons[j].BPerr = DsigOut(i, j)*err;
//applico le correzioni ai bayes
Layers[i].Neurons[j].oldBayesDelta = (-eps * (Layers[i].Neurons[j].BPerr)) + beta * Layers[i].Neurons[j].oldBayesDelta;
Layers[i].Neurons[j].bayes += Layers[i].Neurons[j].oldBayesDelta;
}
// applico le correzioni ai pesi dello strato i-1
for (int j = 0; j < numNeurons(i - 1); j++) {
for (int k = 0; k < numCon(i - 1, j); k++) {
Layers[i - 1].Neurons[j].OutArcs[k].oldDelta = (-eps * (Layers[i - 1].Neurons[j].OutArcs[k].target->BPerr)*Layers[i - 1].Neurons[j].output) + beta * Layers[i - 1].Neurons[j].OutArcs[k].oldDelta;
Layers[i - 1].Neurons[j].OutArcs[k].weight += Layers[i - 1].Neurons[j].OutArcs[k].oldDelta;
}
}
}
Gerr += Err;
resetPotential(); // azzero i potenziali di output della rete
t1 = PerformanceCounter();
elapsedMilliseconds = ((t1 - t0) * 1000.0) / PerformanceFrequency();
executionTime += elapsedMilliseconds;
}
Gerr /= 2;
Perr /= examples.size()*Out.size();
executionTime /= examples.size();
inputTime /= examples.size();
NetPerformance = inputTime;
NetErrPercent = Perr;
cout << "Iterazione " << t << " errore quadratico: " << Gerr << " errore percentuale medio: " << Perr << " %err exemple time: " << executionTime << " ms inputTime: " << inputTime << " ms" << endl;
//if (t > 100 && t < 200)Sleep(500);
if (Perr < ErrPercent) {
cout << "Percentuale di errore obbiettivo raggiunta!" << endl;
return;
}
}
}
//esecuzione del backpropagation per un solo esempio
void oneBP(float eps, float beta, example e) {
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////ALTRE FUNZIONI MLP///////////////////////////////////////////////////////////
void initVectorsProfiler() {
//inizializza i vettori di benchmark presenti nei neuroni
//inizializzazione vettori del primo strato
for (int j = 0; j < Layers[0].numNeurons; j++) {
Layers[0].Neurons[j].influenceOutput.resize(Layers[0].Neurons[j].numOutArcs);
fill(Layers[0].Neurons[j].influenceOutput.begin(), Layers[0].Neurons[j].influenceOutput.end(), 0);
}
for (int i = 1; i < nLayers - 1; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
Layers[i].Neurons[j].influenceInput.resize(Layers[i].Neurons[j].numInArcs);
Layers[i].Neurons[j].influenceOutput.resize(Layers[i].Neurons[j].numOutArcs);
// resetto tutti i vettori zero
fill(Layers[i].Neurons[j].influenceInput.begin(), Layers[i].Neurons[j].influenceInput.end(), 0);
fill(Layers[i].Neurons[j].influenceOutput.begin(), Layers[i].Neurons[j].influenceOutput.end(), 0);
}
}
//inizializzazione dei vettori dell'ultimo strato
for (int j = 0; j < Layers[nLayers - 1].numNeurons; j++) {
Layers[nLayers - 1].Neurons[j].influenceInput.resize(Layers[nLayers - 1].Neurons[j].numInArcs);
fill(Layers[nLayers - 1].Neurons[j].influenceInput.begin(), Layers[nLayers - 1].Neurons[j].influenceInput.end(), 0);
}
}
void resetVectorsProfiler(bool inInfl, bool outInfl) { //resetta a zero tutti i vettori di profilazione esclusi layer input e output
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
if (i > 0 && inInfl == true)fill(Layers[i].Neurons[j].influenceInput.begin(), Layers[i].Neurons[j].influenceInput.end(), 0);
if (i < nLayers - 1 && outInfl == true)fill(Layers[i].Neurons[j].influenceOutput.begin(), Layers[i].Neurons[j].influenceOutput.end(), 0);
}
}
}
//stampa a schermo l'influenza degli input per ogni uscita
void stampInputInfluences(bool all = false) {
cout << endl;
if (all == false) {
for (int i = 0; i < Layers[nLayers - 1].numNeurons; i++) {
cout << "Output (" << i << "): ";
for (int j = 0; j < Layers[nLayers - 1].Neurons[i].influenceInput.size(); j++) {
cout << "infl n in(" << j << "): " << Layers[nLayers - 1].Neurons[i].influenceInput[j] * 100 << "% || ";
}
cout << endl;
}
}
else {
int l = 0;
int c = 0;
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
cout << "neuron (" << i << ", " << j << ") input influence: ";
for (int k = 0; k < Layers[i].Neurons[j].influenceInput.size(); k++) {
l = basePosfromtarget(getTarget(i, j), k)->layer;
c = basePosfromtarget(getTarget(i, j), k)->column;
cout << " (" << l << ", " << c << "): " << Layers[i].Neurons[j].influenceInput[k] * 100 << "% || ";// non mostra la correlazione con il neurone ma solo in numero di arco posizionale per il neurone
}
cout << endl;
}
}
}
}
//stampa a schermo l'influenza degli errori degli output per ogni ingresso
void stampOutputErrorPropagation(bool all = false) {
cout << endl;
if (all == false) {
for (int i = 0; i < Layers[0].numNeurons; i++) {
cout << "input (" << i << ") BPerrore : ";
for (int j = 0; j < Layers[0].Neurons[i].influenceOutput.size(); j++) {
cout << " (" << j << "): " << Layers[0].Neurons[i].influenceOutput[j] * 100 << "% || ";
}
cout << endl;
}
}
else {
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
cout << "neuron (" << i << ", " << j << "): ";
for (int k = 0; k < Layers[i].Neurons[j].influenceOutput.size(); k++) {
cout << "BPerr n out(" << k << "): " << Layers[i].Neurons[j].influenceOutput[k] * 100 << "% || "; // non mostra la correlazione con il neurone ma solo in numero di arco posizionale per il neurone
}
cout << endl;
}
}
}
}
// dal riferimento al nodo target e dall'indice del vettore di influenza restituisce il puntatore al neurone base
ptNeuron basePosfromtarget(ptNeuron target, int k) {
int k2 = -1;
for (int i = 0; i < target->layer; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int w = 0; w < numCon(i, j); w++) {
if (target == getConTarget(i, j, w)) {
k2++;
if (k2 == k) return getTarget(i, j);
}
}
}
}
return nullptr;
}
//dato un neurone e l'indice di un suo arco restituisce l'indice di quella connessione all'interno del vettore influenceInput all'interno del neurone target
int idBaseConReftoTargetInfl(ptNeuron base, int arc) {
ptNeuron target = base->OutArcs[arc].target;
int k2 = -1;
for (int i = 0; i < target->layer; i++) {
for (int j = 0; j < numNeurons(i); j++) {
for (int k = 0; k < numCon(i, j); k++) {
if (target == getConTarget(i, j, k)) {
k2++;
if (base == getTarget(i, j)) {
return k2;
}
}
}
}
}
return -1;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
};
class Hopfield : public Network {
public:
MLP* mlp; // puntatore alla rete da supportare
vector<interArc> binds; // connessioni tra rete mlp e Hopfiled
Hopfield(string filename, MLP *target = NULL) :Network(filename) {
if (target != NULL) {
mlp = target;
supportNet();
}
}
////////////////////////////////////////////////FUNZIONI CREAZIONE RETE Hopfield///////////////////////////////////////////////
//Rete ad anello //RICORDA i pesi sono settati automaticamente a 1 TODO implementa controllo da parametro
void toroidNet(int Nlayers, vector<int> Ncolumns, float conFill) {
//sviluppo simile alle reti quadrate FC ma con connessioni anche tra i neuroni di uno stesso strato
if (Ncolumns.size() != Nlayers) { cout << "ringNet() FAILED! parameters error" << endl; return; } //errore
//dichiarazione layers
Layers.resize(Nlayers);
nLayers = Nlayers;
//dichiarazione neuroni per layer
for (int i = 0; i < Nlayers; i++) {
Layers[i].Neurons.resize(Ncolumns[i]);
Layers[i].numNeurons = Ncolumns[i];
nNeurons += Ncolumns[i];
for (int j = 0; j < Layers[i].numNeurons; j++) {
Layers[i].Neurons[j].layer = i;
Layers[i].Neurons[j].column = j;
}
}
//dichiarazione connessioni in uscita di ogni neurone
for (int i = 0; i < nLayers; i++) {
//cout << "Layer " << i << endl;
for (int j = 0; j < Layers[i].numNeurons; j++) {
//cout << "Neuron " << j << endl;
int nFrontNeurons = 0; for (int g = i; g < nLayers; g++) nFrontNeurons += Layers[g].numNeurons;
Layers[i].Neurons[j].numOutArcs = conFill * nFrontNeurons;
Layers[i].Neurons[j].OutArcs.resize(conFill* nFrontNeurons);
vector<u_int> OutArcAcces = casualVector(Layers[i].Neurons[j].numOutArcs);
vector<u_int> NetAcces = casualVector(nFrontNeurons);
int x, y = 0;
while (y < OutArcAcces.size()) {
x = 0;
for (int k = i; k < nLayers; k++) {
for (int w = 0; w < Layers[k].numNeurons; w++) {
if (y > OutArcAcces.size() - 1) continue;
if (NetAcces[y] == x) {
Layers[i].Neurons[j].OutArcs[OutArcAcces[y]].target = &(Layers[k].Neurons[w]);
Layers[i].Neurons[j].OutArcs[OutArcAcces[y]].weight = 1.0f;
Layers[k].Neurons[w].numInArcs++;
y++; cout << y << endl;
}
x++;
}
}
}
//TOPPA elimino le connessioni dei neuroni con se stessi
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
if (Layers[i].Neurons[j].OutArcs[k].target->layer == i) {
if (Layers[i].Neurons[j].OutArcs[k].target->column == j) {
deleteArc(i, j, i, j);
}
}
}
}
}
}
//Genera una rete di supporto all'apprendimento su misura per una mlp
void supportNet() {
//creo la rete di supporto
nLayers = 1;
nNeurons = mlp->nNeurons;
nArc = mlp->nArc;
Layers.resize(1);
Layers[0].Neurons.resize(nNeurons);
Layers[0].numNeurons = nNeurons;
//collego la rete di supporto alla mlp
//e copio le connessioni dalla mlp sulla hopfield
binds.resize(mlp->nNeurons);
int bindPos = 0;
for (int i = 0; i < mlp->nLayers; i++) {
for (int j = 0; j < mlp->numNeurons(i); j++) {
binds[bindPos].base = getTarget(0, bindPos);
binds[bindPos].base->layer = 0;
binds[bindPos].base->column = bindPos;
binds[bindPos].target = mlp->getTarget(i, j);
binds[bindPos].base->numOutArcs = binds[bindPos].target->numOutArcs;
binds[bindPos].base->numInArcs = binds[bindPos].target->numInArcs;
binds[bindPos].base->OutArcs.resize(binds[bindPos].target->numOutArcs);
bindPos++;
}
}
for (int i = 0; i < binds.size(); i++) {
for (int k = 0; k < binds[i].target->numOutArcs; k++) {
int TargetPos = searchFromTarget(binds[i].target->OutArcs[k].target);
if (TargetPos != -1) {
binds[i].base->OutArcs[k].target = binds[TargetPos].base;
binds[i].base->OutArcs[k].weight = 1.0f;
}
}
}
}
//Genera una rete con connessioni completamente caotiche
void caoticNet() {} // TODO da sviluppare
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////FUNZIONI DI ADDESTRAMENTO//////////////////////////////////////////////////////
//la funzione considera i parametri della rete mlp associta per addestrare la memoria associativa
void trainSupportNet(float Wi, float We, int g) {
float infl = 0, BPerr = 0;
float Dinfl = 0, DbpErr = 0;
for (int i = 0; i < mlp->nLayers; i++) {
for (int j = 0; j < mlp->numNeurons(i); j++) {
for (int k = 0; k < mlp->numCon(i, j); k++) {
BPerr = mlp->getTarget(i, j)->influenceOutput[k];
infl = mlp->getTarget(i, j)->OutArcs[k].target->influenceInput[mlp->idBaseConReftoTargetInfl(mlp->getTarget(i, j), k)];
Dinfl = DeltaHW(Wi, infl, g);
DbpErr = DeltaHW(We, BPerr, g);
binds[searchFromTarget(mlp->getTarget(i, j))].base->OutArcs[k].weight += Dinfl + DbpErr;
}
}
}
}
//funzione che taglia gli archi con il peso sotto una certa soglia
void cutMadArc(int maxCut, int alfa) {
//!!!la procedura supporta soltanto reti ad anello su un solo layer!!!
//maxCut rappresenta il numero massimo di rami da tagliare in questa esecuzione
//alfa rappresenta il coefficiente di soglia percentuale sotto il quale il ramo puo essere tagliato (la soglia una percentuale)
float Wsum = 0;
u_int Wn = 0;
float Wmax = std::numeric_limits<float>::min();
float Wmin = std::numeric_limits<float>::max();
for (int i = 0; i < numNeurons(0); i++) {
for (int j = 0; j < numCon(0, i); j++) {
Wsum += getTarget(0, i)->OutArcs[j].weight;
if (getTarget(0, i)->OutArcs[j].weight > Wmax) { Wmax = getTarget(0, i)->OutArcs[j].weight; }
if (getTarget(0, i)->OutArcs[j].weight < Wmin) { Wmax = getTarget(0, i)->OutArcs[j].weight; }
Wn++;
}
}
nArc = Wn;
mlp->nArc = nArc;
float Wmean = Wsum / Wn;
float treashold = ((Wmax - Wmin)*alfa) + Wmin; // alfa = 0 -> Treashold = Wmin , alfa = 1 -> Treashold = Wmax
int Tcol = 0, TmlpL = 0, TmlpC = 0, mlpL = 0, mlpC = 0;
vector <u_int> acs = casualVector(nNeurons - mlp->numNeurons(mlp->nLayers - 1));
for (int i = 0; i < acs.size(); i++) {
for (int j = 0; j < getTarget(0, acs[i])->numOutArcs; j++) {
if (getTarget(0, acs[i])->OutArcs[j].weight < treashold) { // condizione sufficiente all'eliminazione
Tcol = getTarget(0, acs[i])->OutArcs[j].target->column; //colonna del neurone Hopfield da eliminare
TmlpL = binds[serchFromBase(getTarget(0, acs[i])->OutArcs[j].target)].target->layer; // numLayer del neurone mlp da eliminare
TmlpC = binds[serchFromBase(getTarget(0, acs[i])->OutArcs[j].target)].target->column; // numCol del neurone mlp da eliminare
mlpL = binds[serchFromBase(getTarget(0, acs[i]))].target->layer; // numLayer del neurone mlp contenente la connessione
mlpC = binds[serchFromBase(getTarget(0, acs[i]))].target->column; // numCol del neurone mlp contenente la connessione
// procedura di eliminazione arco
if (getTarget(0, acs[i])->OutArcs[j].target->numInArcs == 1) { // se il neurone obbiettivo ha solo l'arco da eliminare elimino anche il corrispondente elemento bind
//binds.erase(binds.begin() + serchFromBase(getTarget(0, acs[i])->OutArcs[j].target)); //cerco e elimino l'elemento bind dei due neuroni
//deleteArc(0, acs[i], 0, Tcol); // elimino l'arco nella rete Hopfield
//mlp->deleteArc(mlpL, mlpC, TmlpL, TmlpC); //elimino l'arco nella rete mlp
//TODO utilizza funzioni apposite per l'eliminazione dei neuroni delle due reti
nArc++;
}
else { //altrimenti elimino semplicemente l'arco nelle due reti
deleteArc(0, acs[i], 0, Tcol); // elimino l'arco nella rete Hopfield
mlp->deleteArc(mlpL, mlpC, TmlpL, TmlpC); //elimino l'arco nella rete mlp
}
nArc--;
mlp->nArc = nArc;
maxCut--;
if (maxCut < 1) return;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////FUNZIONI MODIFICA RETE///////////////////////////////////////////////////////////
void HdeleteArc(int Nlayer, int Ncolumn, int targetLayer, int targetColumn) {
for (int i = 0; i < Layers[Nlayer].Neurons[Ncolumn].numOutArcs; i++) {
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->layer == targetLayer && Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->column == targetColumn) {
//modifico i parametri della rete relazionati a tale arco
Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->numInArcs--;
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->influenceInput.size() > 0)Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->influenceInput.pop_back();
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->influenceOutput.size() > 0)Layers[Nlayer].Neurons[Ncolumn].influenceOutput.pop_back();
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->numInArcs == 0 && Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->layer != 0) {
deleteNeuron(Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->layer, Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->column);
}
else {
Layers[Nlayer].Neurons[Ncolumn].OutArcs.erase(Layers[Nlayer].Neurons[Ncolumn].OutArcs.begin() + i);
Layers[Nlayer].Neurons[Ncolumn].numOutArcs--;
}
return;
}
}
cout << "errore: arco (" << Nlayer << "-" << Ncolumn << ") -> (" << targetLayer << "-" << targetColumn << ") non trovato, eliinazione fallita" << endl;
}
void HdeleteArcByRef(int Nlayer, int Ncolumn, arc *targetArc) { //elimina un arco con gli indici della base e il puntatore alla connsessione
//modifico i parametri della rete relazionati a tale arco
targetArc->target->numInArcs--;
if (targetArc->target->influenceInput.size() > 0)targetArc->target->influenceInput.pop_back();
if (Layers[Nlayer].Neurons[Ncolumn].influenceOutput.size() > 0)Layers[Nlayer].Neurons[Ncolumn].influenceOutput.pop_back();
if (targetArc->target->numInArcs == 0) {
deleteNeuron(targetArc->target->layer, targetArc->target->column);
}
else {
Layers[Nlayer].Neurons[Ncolumn].OutArcs.erase(Layers[Nlayer].Neurons[Ncolumn].OutArcs.begin() + getOutConTargetID(getTarget(Nlayer, Ncolumn), targetArc->target));
Layers[Nlayer].Neurons[Ncolumn].numOutArcs--;
}
}
void HdeleteNeuron(int Nlayer, int Ncolumn) {
// elimino l'arco dai neuroni che puntano al nodo da eliminare e
// decremento il numero di archi in output hai neuroni con come target il neurone da eliminare
for (int i = 0; i < Nlayer; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
if (Layers[i].Neurons[j].OutArcs[k].target->layer == Nlayer && Layers[i].Neurons[j].OutArcs[k].target->column == Ncolumn) {
deleteArcByRef(i, j, &Layers[i].Neurons[j].OutArcs[k]);
}
}
}
}
// eliminio gli archi del neurone da eliminare
for (int i = 0; i < Layers[Nlayer].Neurons[Ncolumn].numOutArcs; i++) {
deleteArcByRef(Nlayer, Ncolumn, &Layers[Nlayer].Neurons[Ncolumn].OutArcs[i]);
}
// decremento l'indice colonna dei neuroni successivi nello stesso layer
for (int i = Ncolumn + 1; i < Layers[Nlayer].numNeurons; i++) Layers[Nlayer].Neurons[i].column--;
// elimino il neurone
Layers[Nlayer].Neurons.erase(Layers[Nlayer].Neurons.begin() + Ncolumn);
Layers[Nlayer].numNeurons--;
nNeurons--;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////ALTRE FUNZIONI/////////////////////////////////////////////////////////////////
int serchFromBase(ptNeuron base) {
for (int i = 0; i < binds.size(); i++) {
if (base == binds[i].base) {
return i;
}
}
return -1;
}
int searchFromTarget(ptNeuron target) {
for (int i = 0; i < binds.size(); i++) {
if (target == binds[i].target) {
return i;
}
}
return -1;
}
//funzione per il condizionamento dei valori dei vettori di retropropagazione
float DeltaHW(float W, float x, float g) {
return W * powf((2 * x - 1), (1 + 2 * g));
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
};
class StructuralLearning {
private:
MLP * mlp; // rete mlp
Hopfield* hpd; // rete Hopfield
public:
StructuralLearning(MLP* ptMlp, Hopfield* ptHpd) { //costruttore
mlp = ptMlp;
hpd = ptHpd;
mlp->initVectorsProfiler();
}
void StructuralBP(int iter, float eps, float beta, float ErrPercent, float Wi, float We, int g, int cutStart, float alfa, float SmaxErr, float maxWloss) {
int nWt0 = mlp->nArc; // numero dei pesi al tempo t0
int nWtn = 0; // numero dei pesi rimasti all'i-esima iterazzione
float WdelP = 0; // percentuale dei pesi eliminati dall'inizio dell'addestramento
float err = 0; // errore del singolo neurone
float Err = 0; // errore quadratio del singolo esempio
float Gerr = 0; // errore quadratico dell'intero dataset
float Perr = 0;
float delta = 0;
long long t0 = 0;
long long t1 = 0;
long long inT1 = 0;
double elapsedMilliseconds = 0;
double executionTime = 0;
double inputTime = 0;
int x = 0;
vector<float> Out(mlp->numNeurons(mlp->nLayers - 1)); // vettore supporto output rete
vector<float> GerrStory;
for (int t = 0; t < iter; t++) {
Gerr = 0;
Perr = 0;
for (int e = 0; e < mlp->examples.size(); e++) {
t0 = mlp->PerformanceCounter();
// eseguo l'esempio
mlp->inputNetProfiler(mlp->examples[e].input, Out);
t1 = mlp->PerformanceCounter();
elapsedMilliseconds = ((t1 - t0) * 1000.0) / mlp->PerformanceFrequency();
inputTime += elapsedMilliseconds;
// clacolo il delta delle variazioni dei pesi dello strato di output
Err = 0;
for (int r = 0; r < Out.size(); r++) {
delta = mlp->map[r].maxValue - mlp->map[r].minValue;
err = Out[r] - mlp->examples[e].Doutput[r];
Perr += abs(err / mlp->examples[e].Doutput[r]) * 100; // errore percentuale
Err += pow(err, 2);
if (x == e && t > -1) {
cout << endl << "es." << x << " Y" << r << " = " << Out[r] << " " << "D" << r << " = " << mlp->examples[e].Doutput[r] << " err: " << abs(Out[r] - mlp->examples[e].Doutput[r]) << endl;
if (r == Out.size() - 1) {
x--;
mlp->WeightsStamp("fc");
/*mlp->stampInputInfluences(true);
mlp->stampOutputErrorPropagation(true);*/ //erroi retropropagati nell'iterazione precedente
/*Sleep(5000);*/
}
if (x < 0) x = mlp->examples.size() - 1;
}
mlp->resetAbsSumDelta();
//mErr = ((err- map[r].minValue)/ delta) ;
// calcolo il delta della variazione dei pesi
mlp->Layers[mlp->nLayers - 1].Neurons[r].BPerr = mlp->reverseMap(r, err) * (mlp->reverseMap(r, Out[r]) *(1 - mlp->reverseMap(r, Out[r])));
//applico le correzioni ai bayes
mlp->Layers[mlp->nLayers - 1].Neurons[r].oldBayesDelta = (-eps * (mlp->Layers[mlp->nLayers - 1].Neurons[r].BPerr)) + beta * mlp->Layers[mlp->nLayers - 1].Neurons[r].oldBayesDelta;
mlp->Layers[mlp->nLayers - 1].Neurons[r].bayes += mlp->Layers[mlp->nLayers - 1].Neurons[r].oldBayesDelta;
}
//applico le correzioni ai pesi dello strato di output
for (int i = 0; i < mlp->numNeurons(mlp->nLayers - 2); i++) {
for (int j = 0; j < mlp->numCon(mlp->nLayers - 2, i); j++) {
mlp->Layers[mlp->nLayers - 2].Neurons[i].OutArcs[j].oldDelta = (-eps * (mlp->Layers[mlp->nLayers - 2].Neurons[i].OutArcs[j].target->BPerr) * mlp->Layers[mlp->nLayers - 2].Neurons[i].output) + beta * mlp->Layers[mlp->nLayers - 2].Neurons[i].OutArcs[j].oldDelta;
mlp->Layers[mlp->nLayers - 2].Neurons[i].OutArcs[j].weight += mlp->Layers[mlp->nLayers - 2].Neurons[i].OutArcs[j].oldDelta;
mlp->Layers[mlp->nLayers - 2].Neurons[i].absDeltaSum += abs(mlp->Layers[mlp->nLayers - 2].Neurons[i].OutArcs[j].oldDelta);
}
}
// rieseguo la procedura per tutti gli altri strati
for (int i = mlp->nLayers - 2; i > 0; i--) { // dal penultimo strato al secondo
// clacolo il delta delle variazioni dei pesi dello strato i-1
for (int j = 0; j < mlp->numNeurons(i); j++) {
err = 0;
for (int k = 0; k < mlp->numCon(i, j); k++) err += mlp->Layers[i].Neurons[j].OutArcs[k].target->BPerr * mlp->Layers[i].Neurons[j].OutArcs[k].weight;
mlp->Layers[i].Neurons[j].BPerr = mlp->DsigOut(i, j)*err;
//applico le correzioni ai bayes
mlp->Layers[i].Neurons[j].oldBayesDelta = (-eps * (mlp->Layers[i].Neurons[j].BPerr)) + beta * mlp->Layers[i].Neurons[j].oldBayesDelta;
mlp->Layers[i].Neurons[j].bayes += mlp->Layers[i].Neurons[j].oldBayesDelta;
}
// applico le correzioni ai pesi dello strato i-1
for (int j = 0; j < mlp->numNeurons(i - 1); j++) {
for (int k = 0; k < mlp->numCon(i - 1, j); k++) {
mlp->Layers[i - 1].Neurons[j].OutArcs[k].oldDelta = (-eps * (mlp->Layers[i - 1].Neurons[j].OutArcs[k].target->BPerr) * mlp->Layers[i - 1].Neurons[j].output) + beta * mlp->Layers[i - 1].Neurons[j].OutArcs[k].oldDelta;
mlp->Layers[i - 1].Neurons[j].OutArcs[k].weight += mlp->Layers[i - 1].Neurons[j].OutArcs[k].oldDelta;
mlp->Layers[i - 1].Neurons[j].absDeltaSum += abs(mlp->Layers[i - 1].Neurons[j].OutArcs[k].oldDelta);
}
}
}
mlp->resetVectorsProfiler(false, true);
//calcolo dei vettori di influenza dell'errore retropropagato da ogni output
for (int i = mlp->nLayers - 2; i >= 0; i--) {
for (int j = 0; j < mlp->numNeurons(i); j++) {
for (int k = 0; k < mlp->numCon(i, j); k++) {
mlp->getTarget(i, j)->influenceOutput[k] += abs((mlp->getTarget(i, j)->OutArcs[k].oldDelta) / (mlp->getTarget(i, j)->absDeltaSum));
}
}
}
///////////////////////////////////sezione dell'algoritmo in cui la rete ha i vettori di propagazione caricati/////////////////////////
hpd->trainSupportNet(Wi, We, g);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Gerr += Err;
mlp->resetPotential(); // azzero i potenziali di output della rete
mlp->resetAbsSumPotenzial(); //azzero le sommatorie in valore assoluto dei contributi delle connessioni nell'output del neurone
t1 = mlp->PerformanceCounter();
elapsedMilliseconds = ((t1 - t0) * 1000.0) / mlp->PerformanceFrequency();
executionTime += elapsedMilliseconds;
}
///////////////////////////////////// procedura di eliminazione archi ////////////////////////////////////////////////////
if (t > cutStart) {
if (t == cutStart + 1) nWt0 = mlp->nArc;
WdelP = 1.0f - (mlp->nArc / (float)nWt0);
WdelP = WdelP * 100.0f;
if (mlp->NetErrPercent > SmaxErr || WdelP >= maxWloss) {
if (mlp->NetErrPercent > SmaxErr) cout << "error reached: " << mlp->NetErrPercent << endl;
if (WdelP >= maxWloss) cout << "weights lossed: " << WdelP << "%" << endl;
return;
}
hpd->cutMadArc(1, alfa);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Gerr /= 2;
Perr /= mlp->examples.size()*Out.size();
executionTime /= mlp->examples.size();
inputTime /= mlp->examples.size();
mlp->NetPerformance = inputTime;
mlp->NetErrPercent = Perr;
cout << "Iterazione " << t << " errore quadratico: " << Gerr << " errore percentuale medio: " << Perr << " %err" << endl
<< "exemple time: " << executionTime << " ms inputTime: " << inputTime << " ms" << endl
<< "Net integrity:" << (mlp->nArc / (float)nWt0) * 100 << "% arcs: " << mlp->nArc << endl;
//if (t > 100 && t < 200)Sleep(500);
if (Perr < ErrPercent) {
cout << "Percentuale di errore obbiettivo raggiunta!" << endl;
return;
}
}
}
};
////////////////////////////////////////////////////////////////CUDA Kernels//////////////////////////////////////////////////////////
//resetta il valore di una variabile all'interno della scheda grafica
__global__ void CUDAresetVar(float *val) {
*val = 0;
}
//applica ad ogni arco della rete la correzione del peso
__global__ void CUDAapplyWeightCorrections(float eps, float *NeuronOut, float *BPerr, float *weights, int *ArcIn, int *ArcOut, int nArcs) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < nArcs) {
weights[i] += -eps * BPerr[ArcIn[i]] * NeuronOut[ArcOut[i]];
}
}
__global__ void CUDAapplyBayesCorrections(float eps, float *BPerr, float *Bayes, int startN, int endN) {
unsigned int i = startN + (blockIdx.x * blockDim.x) + threadIdx.x;
if (i <= endN) {
Bayes[i] += -eps * BPerr[i];
}
}
//applica alla sommatoria degli errori pesati e retropropagati ad ogni neurone la derivata puntuale della sigmoide
//DEPRECATED!!!
/*__global__ void CUDAapplayDsigToBPerr(float *NeuronOut, float *BPerr, int nNeuron) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < nNeuron) {
BPerr[i] *= NeuronOut[i] * (1 - NeuronOut[i]);
}
}*/
//retropropaga l'errore nella rete
__global__ void CUDAPropagationErr(float *BPerr, float *weights, float *NeuronOut, int *ArcIn, int *ArcOut, int startA, int endA) {
unsigned int i = startA + (blockIdx.x * blockDim.x) + threadIdx.x;
//retropropago l'errore dai neuroni successivi
if (i <= endA) {
//BPerr[ArcOut[i]] += BPerr[ArcIn[i]] * weights[i];
atomicAdd(&BPerr[ArcOut[i]], BPerr[ArcIn[i]] * weights[i]);
}
}
__global__ void CUDAoutDiff(float *BPerr, float *NeuronOut, int startN, int endN) {
int i = startN + (blockIdx.x * blockDim.x) + threadIdx.x;
if (i <= endN) {
BPerr[i] *= NeuronOut[i] * (1 - NeuronOut[i]);
}
}
//calcola l'errore dei neuroni dello strato output
__global__ void CUDAoutputErr(float *NeuronOut, int OutputRef, int numNeurons, int inputN, float *BPerr, float *examples, int exampleRef, float *mapMaxOut, float *mapMinOut, float *MeanErr) {
unsigned int i = (OutputRef) + (blockIdx.x * blockDim.x) + threadIdx.x; //indice di scorrimento vettori: NeuronOut, BPerr,
unsigned int e = (exampleRef + inputN) + (blockIdx.x * blockDim.x) + threadIdx.x; //indice di scorrimento vettori: examples
unsigned int m = (blockIdx.x * blockDim.x) + threadIdx.x; // indice di scorrimento vettori: mapMaxOut, mapMinOut
//if (i == 0) *MeanErr = 0;
if (i < numNeurons) {
float delta = mapMaxOut[m] - mapMinOut[m];
BPerr[i] = (NeuronOut[i] - ((examples[e] - mapMinOut[m]) / delta)) * NeuronOut[i] * (1 - NeuronOut[i]); // formula valida solo per i neuroni di uscita
//atomicAdd(MeanErr, (abs((((NeuronOut[i] * delta) + mapMinOut[m]) - examples[e]) / examples[e]))*100.0f);
atomicAdd(MeanErr, abs((((NeuronOut[i] * delta) + mapMinOut[m]) - examples[e])/ examples[e]) * 100.0f);
//*MeanErr += abs((examples[e] - ((NeuronOut[i] * delta) + mapMinOut[m])) / examples[e]); // calcolo l'errore percentuale sulla singola uscita e lo sommo
}
}
//resetta un dato vettore
__global__ void CUDAresetVector(float *vect, int size) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < size) vect[i] = 0.0f;
}
//imposta i valori di output dei neuroni di input al valore dell'esempio
__global__ void CUDAsetInput(float *NeuronOut, int inputN, int exampleRef, float *example) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < inputN)NeuronOut[i] = example[exampleRef + i];
}
__global__ void CUDAsetSingleInput(float *NeuronOut, int inputN, float *example) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < inputN)NeuronOut[i] = example[i];
}
//applica la sigmoide ai potenziali dei neuroni in un dato intervallo
__global__ void CUDAsigLayer(float *NeuronOut, int start, int end) {
unsigned int i = start + (blockIdx.x * blockDim.x) + threadIdx.x;
if (i <= end) {
NeuronOut[i] = 1 / (1 + expf(-NeuronOut[i]));
}
}
//aggiunge all'output del neurone il contributo del bayes
__global__ void CUDAbayesInput(float *NeuronOut, float *Bayes, int start, int end) {
unsigned int i = start + (blockIdx.x * blockDim.x) + threadIdx.x;
if (i <= end) {
NeuronOut[i] += Bayes[i];
}
}
//propaga l'informazione dai neuroni dello strato input a quello di output
//TODO sostituire l'utilizzo di atomicAdd con la riduzione delle somme (utilizando atomicAdd il minor numero possibile di volte per ogni neurone)
__global__ void CUDAlayerInput(float *weights, int *ArcIn, int *ArcOut, float *NeuronOut, int start, int end) {
unsigned int i = start + (blockIdx.x * blockDim.x) + threadIdx.x;
if (i <= end) {
atomicAdd(&NeuronOut[ArcIn[i]], NeuronOut[ArcOut[i]] * weights[i]); //addizione bloccante non permette ad altri thread di sovrascrivere il falore finche l'operazione non completata
//printf("Neurone %d ( %f ) += Neuron %d ( %f ) * peso ( %f ) \n", ArcIn[i], NeuronOut[ArcIn[i]], ArcOut[i], NeuronOut[ArcOut[i]], weights[i]);
//NeuronOut[ArcIn[i]] += NeuronOut[ArcOut[i]] * weights[i];
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class CUDAcore {
//api di interfacciamento alla GPU
private:
public:
//TODO aggiungere il vettore dei bayes e relativa funzione di applicazione e correzione
hipDeviceProp_t prop; //Device specs struct
int GpuID = 0;
//struttura contenente i puntatori alle aree di memoria conenenti i parametri della rete nella GPU
struct devNetParams {
float *weights = 0;
int *ArcIn = 0;
int *ArcOut = 0;
float *examples = 0;
float *NeuronOut = 0;
float *Bayes = 0;
float *BPerr = 0;
float *mapMaxOut = 0;
float *mapMinOut = 0;
int *NeurInLyr = 0;
int *priority = 0;
float *MeanErr = 0;
float *InputRT = 0;
}gpuNetParams;
vector<float> weights; //pei della rete
vector<int> ArcIn; //target dell'n-esimo arco
vector<int> ArcOut; //base dell'n-esimo arco
vector<float> NeuronOut; //vettore contenente l'output dei neuroni
vector<float> Bayes; //vettore contenente i bayes dei neuroni
vector<float> BPerr; // vettore contenete gli errori retropropagati
vector<float> mapMaxOut; //vettore contenente il massimo valore degli output
vector<float> mapMinOut; //vettore contenente il minimo valore degli output
vector<int> priority; // vettore contenente i punti di sincronizazione dei thread
vector<int> NeurInLyr; //vettore contenente gli indici dell'ultimo neurone di ogni layer
vector<float>examples; //vettore degli esempi
float MeanErr = 0; //veriabile contenente l'errore medio percentuale della rete
int inputN, outputN; //passo di esecuzione elementi del vettore esempi
CUDAcore(int nGpu) {
GpuID = nGpu;
checkCuda(hipGetDeviceProperties(&prop, nGpu)); // carica lo struct hipDeviceProp_t prop con le caratteristiche della GPU con indice 0
}
/*per convertire gli oggetti vector in Array
std::vector<double> v;
double* a = &v[0];
*/
void cudaNetCopyMLP(MLP *pt) {
cout << "copying the net into CUDAcore.." << endl;
weights.resize(pt->nArc);
ArcIn.resize(pt->nArc);
ArcOut.resize(pt->nArc);
NeuronOut.resize(pt->nNeurons);
Bayes.resize(pt->nNeurons);
BPerr.resize(pt->nNeurons);
priority.resize(pt->nLayers + 1);
NeurInLyr.resize(pt->nLayers + 1);
mapMaxOut.resize(pt->map.size());
mapMinOut.resize(pt->map.size());
inputN = pt->numNeurons(0);
outputN = pt->numNeurons(pt->nLayers - 1);
int NeuronIdx = 0;
int ArcIdx = 0;
vector<int> neurons(pt->nLayers);
//carico il vettore di mappatura dell'output della rete
for (int i = 0; i < pt->map.size(); i++) {
mapMaxOut[i] = pt->map[i].maxValue;
mapMinOut[i] = pt->map[i].minValue;
}
NeurInLyr[0] = -1; // setto il primo valore
priority[0] = -1; // setto il primo valore
//carico i parametri della rete
for (int i = 0; i < pt->nLayers; i++) {
for (int j = 0; j < pt->numNeurons(i); j++) {
Bayes[NeuronIdx] = pt->getTarget(i, j)->bayes;
for (int k = 0; k < pt->numCon(i, j); k++) {
weights[ArcIdx] = pt->getTarget(i, j)->OutArcs[k].weight;
ArcIn[ArcIdx] = pt->getTarget(i, j)->OutArcs[k].target->neurIdx;
ArcOut[ArcIdx] = pt->getTarget(i, j)->neurIdx;
ArcIdx++;
}
NeuronIdx++;
}
NeurInLyr[i + 1] = NeuronIdx - 1; // salvo l'indice dell'ultimo neurone del layer corrente
priority[i + 1] = ArcIdx - 1; // salvo l'indice dell'ultimo arco del layer corrente
}
}
void cudaNetPasteMLP(MLP *pt) {
int idx = 0;
int Nidx = 0;
for (int i = 0; i < pt->nLayers; i++) {
for (int j = 0; j < pt->numNeurons(i); j++) {
pt->getTarget(i, j)->bayes = Bayes[Nidx++];
for (int k = 0; k < pt->numCon(i, j); k++) {
pt->getTarget(i, j)->OutArcs[k].weight = weights[idx++];
}
}
}
}
void cudaNetCopyHopfield(Hopfield* pt) {
}
void cudaNetCopyExamples(MLP *pt) {
cout << "copying example into CUDAcore" << endl;
examples.resize(pt->examples.size()*(pt->numNeurons(0) + pt->numNeurons(pt->nLayers - 1)));
int idx = 0;
for (int i = 0; i < pt->examples.size(); i++) {
for (int j = 0; j < pt->numNeurons(0); j++) {
examples[idx++] = pt->examples[i].input[j];
}
for (int j = 0; j < pt->numNeurons(pt->nLayers - 1); j++) {
examples[idx++] = pt->examples[i].Doutput[j];
}
}
}
////////////////////////////////////////////////////////////////CUDA Kernel functions/////////////////////////////////////////////////
//esegue le operazioni di allocamento memoria e preparazione al lancio del kernel di propagazione della rete
hipError_t hostCUDAtrainingNet(float eps, int Niter, int ThxBlock) {
cout << "learning is started!" << endl;
//host variables
float *Cweights = &weights[0];
int *CArcIn = &ArcIn[0];
int *CArcOut = &ArcOut[0];
float *CNeuronOut = &NeuronOut[0];
float *CBayes = &Bayes[0];
float *CBPerr = &BPerr[0];
float *CmapMaxOut = &mapMaxOut[0];
float *CmapMinOut = &mapMinOut[0];
float *Cexamples = &examples[0];
int *CNeurInLyr = &NeurInLyr[0];
int *Cpriority = &priority[0];
float *CMeanErr = &MeanErr;
//device variables
float *dev_weights = 0;
int *dev_ArcIn = 0;
int *dev_ArcOut = 0;
float *dev_examples = 0;
float *dev_NeuronOut = 0;
float *dev_Bayes = 0;
float *dev_BPerr = 0;
float *dev_mapMaxOut = 0;
float *dev_mapMinOut = 0;
int *dev_NeurInLyr = 0;
int *dev_priority = 0;
float *dev_MeanErr = 0;
//int ThxBlock = 1024;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(GpuID);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
// Allocate GPU buffers for vectors
cudaStatus = hipMalloc((void**)&dev_weights, weights.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&dev_ArcIn, ArcIn.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&dev_ArcOut, ArcOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&dev_NeuronOut, NeuronOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&dev_Bayes, Bayes.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&dev_BPerr, BPerr.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&dev_mapMaxOut, mapMaxOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&dev_mapMinOut, mapMinOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&dev_examples, examples.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&dev_NeurInLyr, NeurInLyr.size() * sizeof(int));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&dev_priority, priority.size() * sizeof(int));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&dev_MeanErr, sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_weights, Cweights, weights.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(dev_ArcIn, CArcIn, ArcIn.size() * sizeof(int), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(dev_ArcOut, CArcOut, ArcOut.size() * sizeof(int), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(dev_NeuronOut, CNeuronOut, NeuronOut.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(dev_Bayes, CBayes, Bayes.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(dev_BPerr, CBPerr, BPerr.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(dev_mapMaxOut, CmapMaxOut, mapMaxOut.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(dev_mapMinOut, CmapMinOut, mapMinOut.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(dev_examples, Cexamples, examples.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(dev_NeurInLyr, CNeurInLyr, NeurInLyr.size() * sizeof(int), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(dev_priority, Cpriority, priority.size() * sizeof(int), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(dev_MeanErr, CMeanErr, sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
//////////////////lancio dei kernel all'interno della gpu////////////////
int startA = 0;
int endA = 0;
int startN = 0;
int endN = 0;
int numLayerArcs = 0;
int numLayerNeur = 0;
int numOfBlocksMax = 0;
int numOfBlocksA = 0;
int numOfBlocksN = 0;
int numOfBlocksOut = floorf(outputN / ThxBlock) + 1;
int exampleRef = 0;
int outputRef = NeuronOut.size() - outputN;
long long t0 = 0, t1 = 0;
long long t0in = 0, t1in = 0;
double elapsedMilliseconds = 0;
double elapsedInMilliseconds = 0;
for (int it = 0; it < Niter; it++) { //scorro le iterazioni
t0 = PerformanceCounter();
for (int t = 0; t < (examples.size() / (inputN + outputN)); t++) { //scorro gli esempi
//imposto il riferimento per l'esempio di input
exampleRef = t * (inputN + outputN);
t0in = PerformanceCounter();
//resetto il vettore contenente lo stato di attivazione dei neuroni
numOfBlocksA = (floorf(NeuronOut.size() / ThxBlock) + 1);
CUDAresetVector << <numOfBlocksA, ThxBlock >> > (dev_NeuronOut, NeuronOut.size());
//imposto i valori di input ai neuroni dello strato input
numOfBlocksA = (floorf(inputN / ThxBlock) + 1);
CUDAsetInput << <numOfBlocksA, ThxBlock >> > (dev_NeuronOut, inputN, exampleRef, dev_examples);
//propagazione dell'input nella rete
startA = 0; // indice di partenza dei vettori archi
endA = 0; // ultimo indice dei vettori archi
startN = 0; // indice di partenza dei vettori neuroni
endN = 0; // ultimo indice dei vettori neuroni
for (int i = 0; i < priority.size() - 1; i++) { //NB non viene applicata la sigmoide allo strato di input eventulmente correggi
startA = priority[i] + 1;
endA = priority[i + 1];
if (i < priority.size() - 2) {
startN = NeurInLyr[i + 1] + 1;
endN = NeurInLyr[i + 2];
}
numLayerArcs = endA - startA + 1;
numLayerNeur = endN - startN + 1;
numOfBlocksA = floorf(numLayerArcs / ThxBlock) + 1;
numOfBlocksN = floorf(numLayerNeur / ThxBlock) + 1;
if (i < priority.size() - 2) {
CUDAlayerInput << <numOfBlocksA, ThxBlock >> > (dev_weights, dev_ArcIn, dev_ArcOut, dev_NeuronOut, startA, endA); //propago l'output dei neuroni al prossimo/i layer
CUDAbayesInput << < numOfBlocksN, ThxBlock >> > (dev_NeuronOut, dev_Bayes, startN, endN); //applico il contributo dei bayes all output dei neuroni del layer corrente
CUDAsigLayer << <numOfBlocksN, ThxBlock >> > (dev_NeuronOut, startN, endN); //applico la sigmoide allo stato di attivazione dei neuroni
}
}
t1in = PerformanceCounter();
elapsedInMilliseconds += ((t1in - t0in) * 1000.0) / PerformanceFrequency();
//resetto il vettore contenente l'errore dei neuroni
numOfBlocksN = (floorf(BPerr.size() / ThxBlock) + 1);
CUDAresetVector << <numOfBlocksN, ThxBlock >> > (dev_BPerr, BPerr.size());
hipLaunchKernelGGL(( CUDAresetVar) , dim3(1), dim3(1) , 0, 0, dev_MeanErr);
CUDAoutputErr << <numOfBlocksOut, ThxBlock >> > (dev_NeuronOut, outputRef, NeuronOut.size(), inputN, dev_BPerr, dev_examples, exampleRef, dev_mapMaxOut, dev_mapMinOut, dev_MeanErr);
hipMemcpy(CMeanErr, dev_MeanErr, sizeof(float), hipMemcpyDeviceToHost);
////////////////////////////////////visualizzazione dell'esempio///////////////////////////////////
if (en == t) {
cudaStatus = hipMemcpy(CNeuronOut, dev_NeuronOut, NeuronOut.size() * sizeof(float), hipMemcpyDeviceToHost);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
copy(CNeuronOut, CNeuronOut + NeuronOut.size(), NeuronOut.begin());
cout << "esempio " << en << endl;
for (int on = 0; on < outputN; on++) {
delta = mapMaxOut[on] - mapMinOut[on];
cout << "Y" << on << ": " << (NeuronOut[NeuronOut.size() - outputN + on] * delta) + mapMinOut[on] << " D" << on << ": " << examples[exampleRef + inputN + on] << endl;
}
cout << endl;
en--;
if (en < 0)en = (examples.size() / (inputN + outputN)) - 1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
MeanErr += *CMeanErr / outputN;
//retropropagazione dell'errore
for (int i = priority.size() - 2; i > 1; i--) {
startA = priority[i - 1] + 1;
endA = priority[i];
startN = NeurInLyr[i - 1] + 1;
endN = NeurInLyr[i];
numLayerArcs = endA - startA + 1;
numLayerNeur = endN - startN + 1;
numOfBlocksA = floorf(numLayerArcs / ThxBlock) + 1;
numOfBlocksN = floorf(numLayerNeur / ThxBlock) + 1;
//numOfBlocksMax = maxOf(numOfBlocksA, numOfBlocksN);
hipLaunchKernelGGL(( CUDAPropagationErr) , dim3(numOfBlocksA), dim3(ThxBlock) , 0, 0, dev_BPerr, dev_weights, dev_NeuronOut, dev_ArcIn, dev_ArcOut, startA, endA);
hipLaunchKernelGGL(( CUDAoutDiff) , dim3(numOfBlocksN), dim3(ThxBlock) , 0, 0, dev_BPerr, dev_NeuronOut, startN, endN);
cudaStatus = hipMemcpy(CBPerr, dev_BPerr, BPerr.size() * sizeof(float), hipMemcpyDeviceToHost);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
copy(CBPerr, CBPerr + BPerr.size(), BPerr.begin());
}
//applico a ogni peso la sua correzione
startN = NeurInLyr[1] + 1; // la correzione dei bais va applicata dal primo layer nascosto in poi
endN = NeurInLyr[NeurInLyr.size() - 1];
numLayerNeur = endN - startN + 1;
numOfBlocksA = floorf(weights.size() / ThxBlock) + 1;
numOfBlocksN = floorf(numLayerNeur / ThxBlock) + 1;
CUDAapplyWeightCorrections << <numOfBlocksA, ThxBlock >> > (eps, dev_NeuronOut, dev_BPerr, dev_weights, dev_ArcIn, dev_ArcOut, weights.size());
CUDAapplyBayesCorrections << <numOfBlocksN, ThxBlock >> > (eps, dev_BPerr, dev_Bayes, startN, endN);
}
t1 = PerformanceCounter();
elapsedMilliseconds = ((t1 - t0) * 1000.0) / PerformanceFrequency(); // calcolo il tempo di esecuzione di una iterazione di addestramento (tutto il set)
MeanErr = MeanErr / (examples.size() / (inputN + outputN)); //calcolo l'errore percentuale medio sul dataset
elapsedInMilliseconds = elapsedInMilliseconds / (examples.size() / (inputN + outputN));
cout << "Iterazione: " << it << " " << MeanErr << " %Err " << "execution time:" << elapsedMilliseconds << "ms" << endl;
cout << "mean InputTime: " << elapsedInMilliseconds << "ms" << endl;
printNetSpecs();
MeanErr = 0;
}
cudaStatus = hipMemcpy(Cweights, dev_weights, weights.size() * sizeof(float), hipMemcpyDeviceToHost);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
copy(Cweights, Cweights + weights.size(), weights.begin());
cudaStatus = hipMemcpy(CBayes, dev_Bayes, Bayes.size() * sizeof(float), hipMemcpyDeviceToHost);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
copy(CBayes, CBayes + Bayes.size(), Bayes.begin());
//checkpoint di errore (se la GPU richiama un qualunque errore ripare da qui)
Error:
//libero la memoria nella scheda grafica
hipFree(dev_weights);
hipFree(dev_ArcIn);
hipFree(dev_ArcOut);
hipFree(dev_NeuronOut);
hipFree(dev_examples);
hipFree(dev_BPerr);
hipFree(dev_mapMaxOut);
hipFree(dev_mapMinOut);
hipFree(dev_priority);
hipFree(dev_NeurInLyr);
//ritorno lo stato della GPU
return cudaStatus;
}
//esegue il caricamento nella gpu dei parametri della rete
hipError_t hostCUDAuploadNetParams() {
hipError_t cudaStatus;
cudaStatus = hipSetDevice(GpuID);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
//host variables
float *Cweights = &weights[0];
int *CArcIn = &ArcIn[0];
int *CArcOut = &ArcOut[0];
float *CNeuronOut = &NeuronOut[0];
float *CBayes = &Bayes[0];
float *CBPerr = &BPerr[0];
float *CmapMaxOut = &mapMaxOut[0];
float *CmapMinOut = &mapMinOut[0];
float *Cexamples = &examples[0];
int *CNeurInLyr = &NeurInLyr[0];
int *Cpriority = &priority[0];
float *CMeanErr = &MeanErr;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(GpuID);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
// Allocate GPU buffers for vectors
cudaStatus = hipMalloc((void**)&gpuNetParams.weights, weights.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&gpuNetParams.ArcIn, ArcIn.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&gpuNetParams.ArcOut, ArcOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&gpuNetParams.NeuronOut, NeuronOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&gpuNetParams.Bayes, Bayes.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&gpuNetParams.BPerr, BPerr.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&gpuNetParams.mapMaxOut, mapMaxOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&gpuNetParams.mapMinOut, mapMinOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&gpuNetParams.examples, examples.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&gpuNetParams.NeurInLyr, NeurInLyr.size() * sizeof(int));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&gpuNetParams.priority, priority.size() * sizeof(int));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&gpuNetParams.InputRT, inputN * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMalloc((void**)&gpuNetParams.MeanErr, sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(gpuNetParams.weights, Cweights, weights.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(gpuNetParams.ArcIn, CArcIn, ArcIn.size() * sizeof(int), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(gpuNetParams.ArcOut, CArcOut, ArcOut.size() * sizeof(int), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(gpuNetParams.NeuronOut, CNeuronOut, NeuronOut.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(gpuNetParams.Bayes, CBayes, Bayes.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(gpuNetParams.BPerr, CBPerr, BPerr.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(gpuNetParams.mapMaxOut, CmapMaxOut, mapMaxOut.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(gpuNetParams.mapMinOut, CmapMinOut, mapMinOut.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(gpuNetParams.examples, Cexamples, examples.size() * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(gpuNetParams.NeurInLyr, CNeurInLyr, NeurInLyr.size() * sizeof(int), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(gpuNetParams.priority, Cpriority, priority.size() * sizeof(int), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(gpuNetParams.MeanErr, CMeanErr, sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
if (false) {
Error:
//libero la memoria nella scheda grafica
hipFree(gpuNetParams.weights);
hipFree(gpuNetParams.ArcIn);
hipFree(gpuNetParams.ArcOut);
hipFree(gpuNetParams.NeuronOut);
hipFree(gpuNetParams.examples);
hipFree(gpuNetParams.BPerr);
hipFree(gpuNetParams.mapMaxOut);
hipFree(gpuNetParams.mapMinOut);
hipFree(gpuNetParams.priority);
hipFree(gpuNetParams.NeurInLyr);
cout << "ERRORE: libero la memoria della gpu. " << endl;
}
return cudaStatus;
}
//esegue il download dalla gpu dei parametri della rete
hipError_t hostCUDAdownloadNetParams() {
cout << "downloading net params from gpu.." << endl;
float *Cweights = &weights[0];
float *CBayes = &Bayes[0];
hipError_t cudaStatus;
cudaStatus = hipSetDevice(GpuID);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(Cweights, gpuNetParams.weights, weights.size() * sizeof(float), hipMemcpyDeviceToHost);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = hipMemcpy(CBayes, gpuNetParams.Bayes, Bayes.size() * sizeof(float), hipMemcpyDeviceToHost);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
if (false) {
Error:
//libero la memoria nella scheda grafica
hipFree(gpuNetParams.weights);
hipFree(gpuNetParams.ArcIn);
hipFree(gpuNetParams.ArcOut);
hipFree(gpuNetParams.NeuronOut);
hipFree(gpuNetParams.examples);
hipFree(gpuNetParams.BPerr);
hipFree(gpuNetParams.mapMaxOut);
hipFree(gpuNetParams.mapMinOut);
hipFree(gpuNetParams.priority);
hipFree(gpuNetParams.NeurInLyr);
cout << "ERRORE: libero la memoria della gpu. " << endl;
}
return cudaStatus;
}
//esegue l'input della rete gia addestrata prendendo in input l'esempio dato
hipError_t hostCUDAInputNet(float *input, int ThxBlock) {
//inportante verificare che l'input abbia la stessa dimansione dell'input della rete
hipError_t cudaStatus;
cudaStatus = hipSetDevice(GpuID);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
//////////////////lancio dei kernel all'interno della gpu////////////////
//int ThxBlock = 1024;
int startA = 0;
int endA = 0;
int startN = 0;
int endN = 0;
int numLayerArcs = 0;
int numLayerNeur = 0;
int numOfBlocksMax = 0;
int numOfBlocksA = 0;
int numOfBlocksN = 0;
int numOfBlocksOut = floorf(outputN / ThxBlock) + 1;
int outputRef = NeuronOut.size() - outputN;
long long t0in = 0, t1in = 0;
double elapsedInMilliseconds = 0;
t0in = PerformanceCounter();
//resetto il vettore contenente lo stato di attivazione dei neuroni
numOfBlocksA = (floorf(NeuronOut.size() / ThxBlock) + 1);
hipLaunchKernelGGL(( CUDAresetVector) , dim3(numOfBlocksA), dim3(ThxBlock) , 0, 0, gpuNetParams.NeuronOut, NeuronOut.size());
cudaStatus = hipMemcpy(gpuNetParams.InputRT, input, inputN * sizeof(float), hipMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
//imposto i valori di input ai neuroni dello strato input
numOfBlocksA = (floorf(inputN / ThxBlock) + 1);
hipLaunchKernelGGL(( CUDAsetSingleInput) , dim3(numOfBlocksA), dim3(ThxBlock), 0, 0, gpuNetParams.NeuronOut, inputN, gpuNetParams.InputRT);
//propagazione dell'input nella rete
startA = 0; // indice di partenza dei vettori archi
endA = 0; // ultimo indice dei vettori archi
startN = 0; // indice di partenza dei vettori neuroni
endN = 0; // ultimo indice dei vettori neuroni
for (int i = 0; i < priority.size() - 1; i++) { //NB non viene applicata la sigmoide allo strato di input eventulmente correggi
startA = priority[i] + 1;
endA = priority[i + 1];
if (i < priority.size() - 2) {
startN = NeurInLyr[i + 1] + 1;
endN = NeurInLyr[i + 2];
}
numLayerArcs = endA - startA + 1;
numLayerNeur = endN - startN + 1;
numOfBlocksA = floorf(numLayerArcs / ThxBlock) + 1;
numOfBlocksN = floorf(numLayerNeur / ThxBlock) + 1;
if (i < priority.size() - 2) {
hipLaunchKernelGGL(( CUDAlayerInput) , dim3(numOfBlocksA), dim3(ThxBlock) , 0, 0, gpuNetParams.weights, gpuNetParams.ArcIn, gpuNetParams.ArcOut, gpuNetParams.NeuronOut, startA, endA); //propago l'output dei neuroni al prossimo/i layer
hipLaunchKernelGGL(( CUDAbayesInput) , dim3(numOfBlocksN), dim3(ThxBlock) , 0, 0, gpuNetParams.NeuronOut, gpuNetParams.Bayes, startN, endN); //applico il contributo dei bayes all output dei neuroni del layer corrente
hipLaunchKernelGGL(( CUDAsigLayer) , dim3(numOfBlocksN), dim3(ThxBlock) , 0, 0, gpuNetParams.NeuronOut, startN, endN); //applico la sigmoide allo stato di attivazione dei neuroni
}
}
//copio l'output dei neuroni dello strato output nella memoria della cpu
cudaStatus = hipMemcpy(&NeuronOut[0] + outputRef, gpuNetParams.NeuronOut + outputRef, outputN * sizeof(float), hipMemcpyDeviceToHost); //TODO da errore e non carica il vettore trovare il BUG
if (cudaCheckStatus(cudaStatus) == true) goto Error;
t1in = PerformanceCounter();
elapsedInMilliseconds = ((t1in - t0in) * 1000.0) / PerformanceFrequency();
////////////////////////////////////visualizzazione dell'esempio///////////////////////////////////
float delta;
cout << "input time: " << elapsedInMilliseconds << " ms" << endl;
for (int on = 0; on < outputN; on++) {
delta = mapMaxOut[on] - mapMinOut[on];
cout << "Y" << on << ": " << (NeuronOut[NeuronOut.size() - outputN + on] * delta) + mapMinOut[on] << endl;
}
cout << endl;
///////////////////////////////////////////////////////////////////////////////////////////////////
if (false) {
Error:
//libero la memoria nella scheda grafica
hipFree(gpuNetParams.weights);
hipFree(gpuNetParams.ArcIn);
hipFree(gpuNetParams.ArcOut);
hipFree(gpuNetParams.NeuronOut);
hipFree(gpuNetParams.examples);
hipFree(gpuNetParams.BPerr);
hipFree(gpuNetParams.mapMaxOut);
hipFree(gpuNetParams.mapMinOut);
hipFree(gpuNetParams.priority);
hipFree(gpuNetParams.NeurInLyr);
}
return cudaStatus;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////CUDA UTILITY//////////////////////////////////////////////////////////////////
//verifica la corretta esecuzione di un operazione
inline hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
//verifica la corretta esecuzione di un operazione restituendo un bool
bool cudaCheckStatus(hipError_t cudaStatus) {
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
return true;
}
}
//stampa a schermo le principali propriet della scheda
void printDeviceSpecs() {
printf("\nDevice: %s\n", prop.name);
printf("Cores clock: %d MHz\n", (prop.clockRate / 1000));
printf("Memory clock: %d MHz\n", (prop.memoryClockRate / 1000));
printf("Total global memmory %.2f MB\n", (float)(prop.totalGlobalMem / (1024 * 1024)));
printf("Max grid size: x %d, y %d, z %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("Max block axis size: x %d, y %d, z %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Warp size: %d\n", prop.warpSize);
printf("Max therads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max therads per multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor);
printf("Compute Mode: %d\n", prop.computeMode);
printf("Host mem access: %d\n", prop.canUseHostPointerForRegisteredMem);
printf("Shared mem per multiprocessor %.2f KB\n", (float)(prop.sharedMemPerMultiprocessor / 1024));
printf("Max shared mem per blocco %.2f KB\n", (float)(prop.sharedMemPerBlock / 1024));
}
//stampa i parametri della rete che vengono passati alla scheda
void printNetSpecs() {
cout << "dimensione del modello: " << sizeOfModel("MB") << " MB" << endl;
cout << "numero totale dei neuroni : " << NeuronOut.size() << "(" << sizeOfVector(NeuronOut, "KB") + sizeOfVector(Bayes, "KB") + sizeOfVector(BPerr, "KB")<< " KB)" << endl;
cout << "numero totale degli archi : " << weights.size() << "(" << sizeOfVector(weights, "MB") + sizeOfVector(ArcIn, "MB") + sizeOfVector(ArcOut, "MB") << " MB)" << endl;
cout << "numero esempi : " << examples.size() / (inputN + outputN) << " (" << sizeOfVector(examples, "MB") << " MB)" << endl;
}
//calcola il peso del modello
float sizeOfModel(string mesureUnit = "B") {
int size = 0;
int scale = 0;
size += sizeof(weights[0])*weights.size(); //dimensione del vettore pesi
size += sizeof(ArcIn[0])*ArcIn.size(); //dimensione del vettore contenente i target degli archi
size += sizeof(ArcOut[0])*ArcOut.size(); //dimensione del vettore contenete i neuroni base degli archi
size += sizeof(NeuronOut[0])*NeuronOut.size(); //dimensione del vettore contenente gli output dei neuroni
size += sizeof(Bayes[0])*Bayes.size(); //dimensione del vettore contenente i bayes
size += sizeof(BPerr[0])*BPerr.size(); //dimensione del vettore contenente
size += sizeof(priority[0])*priority.size(); // dimensione del vettore priorit
size += sizeof(examples[0])*examples.size(); //dimensione del vettore di esmpio
if (mesureUnit == "B") { scale = 1; } //Byte
else if (mesureUnit == "KB") { scale = 1024; } //Kilobyte
else if (mesureUnit == "MB") { scale = 1024 * 1024; } // Megabyte
else if (mesureUnit == "GB") { scale = 1024 * 1024 * 1024; } //Gigabyte
else { cout << "L'unit di misura non corretta!!" << endl; return 0.0f; }
return (float)(size / scale);
}
template<typename T, typename A>
float sizeOfVector(vector<T, A> const& vect,string mesureUnit = "B") {
int size = 0;
int scale = 0;
size = sizeof(vect[0])*vect.size(); //dimensione del vettore di esmpio
if (mesureUnit == "B") { scale = 1; } //Byte
else if (mesureUnit == "KB") { scale = 1024; } //Kilobyte
else if (mesureUnit == "MB") { scale = 1024 * 1024; } // Megabyte
else if (mesureUnit == "GB") { scale = 1024 * 1024 * 1024; } //Gigabyte
else { cout << "L'unit di misura non corretta!!" << endl; return 0.0f; }
return (float)((float)size /(float)scale);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////ALTRE FUNZIONI////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////WINDOWS HIGH SPEED TIMING//////////////////////////////////////////////////////
BOOL WINAPI QueryPerformanceCounter(_Out_ LARGE_INTEGER *lpPerformanceCount);
BOOL WINAPI QueryPerformanceFrequency(_Out_ LARGE_INTEGER *lpFrequency);
inline long long PerformanceCounter() noexcept
{
LARGE_INTEGER li;
::QueryPerformanceCounter(&li);
return li.QuadPart;
}
inline long long PerformanceFrequency() noexcept
{
LARGE_INTEGER li;
::QueryPerformanceFrequency(&li);
return li.QuadPart;
}
/* HOW TO USE:
long long t0 = PerformanceCounter();
//code to bench..
long long t1 = PerformanceCounter();
double elapsedMilliseconds = ((t1 - t0) * 1000.0) / PerformanceFrequency();
*/
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
};
class genEvolve {
private:
vector<MLP> mlps; // vettore di reti mlp
vector<Hopfield> hpds; // vettore di reti Hopfield
public:
};
/*
EXAMPLE:
CUDAcore gtx760(0); //create connection with gpu
gtx760.printDeviceSpecs(); //print specs of gpu
int layer = 10;
int columns = 50;
int out = 4;
int in = 9;
MLP a("cudaTest"); //create mlp object
a.qubeNet(layer, columns, in, out, true, 0.0001f);
a.setNetMap(800, 0);
a.getDataset("datnc"); //loading dataset from file
a.datasetOffset(200.0f); //adding an offset to the dataset
gtx760.cudaNetCopyMLP(&a); //prepare structure of net to be uploded to gpu memory
gtx760.cudaNetCopyExamples(&a); //prepare dataset to be uploded to gpu memory
gtx760.hostCUDAtrainingNet(10.0e-10f, 200, 256); //upload nedded structure into gpu and start accelerated training
gtx760.cudaNetPasteMLP(&a); //copy trained model into cpu memory
a.saveNet("datncNet-l(5)-c(60)-FC"); //saving model
a.BP(5, 10.0e-10f, 0, 1); //continue to train net into cpu
a.saveNet("CpuTest5"); //new model saving
*/
| ca3e349d39fa4810cbadf76e206ac1203716f467.cu | //////////////CUDA INCLUDES///////////////
#include "cuda.h"
#include "cuda_runtime.h"
#include <device_functions.h>
#include "device_launch_parameters.h"
//////////////////////////////////////////
#include "stdafx.h"
#include <iostream>
#include <stdio.h>
#include <fstream>
#include <sstream>
#include <string>
#include <windows.h>
#include <limits>
#include <math.h>
#include <vector>
#include <list>
#include <time.h>
#include <algorithm>
#include <assert.h>
#undef max
#undef min
#define M_PI 3.14159265358979323846 /* pi */
#define maxOf(a, b) (((a) > (b)) ? a : b)
using namespace std;
struct Neuron;
typedef Neuron *ptNeuron;
struct arc {
ptNeuron target = nullptr;
float weight = 0;
float oldDelta = 0;
//bool enabled = true;
};
struct interArc {
ptNeuron target = nullptr;
ptNeuron base = nullptr;
};
struct Neuron {
vector<arc> OutArcs; // to inizialize: = new vector<arc>(10);
u_int numOutArcs = 0; // numero di archi in uscita
u_int numInArcs = 0; //numero di archi in ingresso
u_int layer = 0; //indice riga del neurone
u_int column = 0; //indice della colonna del neurone
float bayes = 0.01f; //peso del bayes
float oldBayesDelta = 0; //ultima variazione del peso
//vector<float> timeBayes; // vettore delle interconnessioni temporali
vector<float> influenceInput; // vettore contenente la percentuale di influenza relativa ad ogni input
vector<float> influenceOutput; // vettore contenente la percentuale di influenza relativa all'errore retropropagato da ogni output
float output = 0; //potenziale attuale del neurone
float absOutSum = 0; //somma in valore assoluto degli input del neurone
float absDeltaSum = 0; //somma in valore assoluto delle variazioni dei pesi
float BPerr = 0; //errore di retropropagazione
int neurIdx = 0; //ogni neurone è contraddistinto da un indice unico che si riferisce alla sua posizione
};
struct Layer {
vector<Neuron> Neurons;
u_int numNeurons = 0;
};
struct Omap {
float maxValue = 1;
float minValue = 0;
};
struct conMap { // struttura necessaria per l'inserimento di un nuovo neurone
u_int startLyr;
u_int startCol;
u_int arcRef;
u_int targetLyr;
u_int targetCol;
};
struct timeSeries {
list<float> evento;
};
struct example {
vector<float> input;
vector<float> Doutput;
};
struct Dataset {
vector<example> trainingSet;
vector<example> validationSet;
float triningErr = 0;
float validationErr = 0;
};
class DatasetCore {
public:
list<Dataset> Datasets;
DatasetCore() {
}
////////////////////////////////////////////////MANIPOLAZIONE DATASET//////////////////////////////////////
void readTimeSeriesCsv(string filename, int outStep, int inEx, float trainingPerc) {
//outStep - rappresenta il numero di valori per esempio output
//inEx - è il numero di esempi output precedenti che vengono passati in ogni esempio input
//trainingPerc - è la percentuale di dataset da usare come trainingset
cout << "caricamento del file " << filename << endl;
ifstream file(filename + ".csv");
stringstream stream;
stream << file.rdbuf();
string line, cell;
int col = 0, row = -1;
int fileRow = coutnRow(filename);
list<timeSeries> esempi(fileRow);
list<timeSeries>::iterator it = esempi.begin();
while (getline(stream, line, '\n')) { //smista righe
stringstream streamline(line);
col = 0; row++;
it->evento.resize(0);
while (getline(streamline, cell, ';')) {
//cout << stof(cell) << endl;
it->evento.push_back(stof(cell));
}
it++;
}
Dataset newSet;
example newEx;
newEx.Doutput.resize(outStep);
newEx.input.resize(inEx*outStep);
int nEx = esempi.size() - inEx;
int ntrainEx = (int)((trainingPerc / 100) * nEx);
int nvalidateEx = nEx - ntrainEx;
int sPos = 0, ePos = inEx, pos = 0; // il passo è outStep
int inId = 0, outId = 0;
for (int i = 0; i < nEx; i++) {
pos = 0, inId = 0, outId = 0;
for (list<timeSeries>::iterator p = esempi.begin(); p != esempi.end(); p++) {
for (list<float>::iterator q = p->evento.begin(); q != p->evento.end(); q++) {
if (pos >= sPos && pos <= ePos) {
if (pos >= inEx + sPos) { // carico il vettore output
newEx.Doutput[outId] = *q;
outId++;
}
else { //carico il vettore di input
newEx.input[inId] = *q;
inId++;
}
}
}
pos++;
}
if (sPos < ntrainEx) { // carico il trining set
newSet.trainingSet.push_back(newEx);
}
else { //carico il validationset
newSet.validationSet.push_back(newEx);
}
//cout << sPos << endl;
sPos++;
ePos++;
}
Datasets.push_back(newSet);
}
vector<example> getDataset(int n, bool training = true) {
list<Dataset>::iterator p = Datasets.begin();
for (int k = 0; k < n; k++) p++;
if (training == true) {
return p->trainingSet;
}
else {
return p->validationSet;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////ALTRE FUNZIONI//////////////////////////////////////////////
int coutnRow(string filename) {
ifstream file(filename + ".csv");
stringstream stream;
stream << file.rdbuf();
string line;
int row = 0;
while (getline(stream, line, '\n'))row++;
return row;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
};
class Network {
public:
vector<Layer> Layers; // vettore di struct layer
vector<example> examples; // vettore di esempi per l'apprendimento
vector<Omap> map; // vettore contenente i valori di rimappatura del'output della rete
string genoma = ""; // nome del file
u_int nLayers = 0; // Layer nella rete compresi input output
u_int nNeurons = 0; // numero totale neuroni nella rete
u_int nArc = 0; //numero totale di tutti gli archi presenti nella rete
Network(string filename) {
genoma = filename;
}
////////////////////////////////////FUNZIONE COSTRUZIONE RETE DA FILE/////////////////////////////////////////////////////////
void getNetParams() {
ifstream file(genoma + ".txt");
string line, segment, pice;
int flag = 4; // 3- NL, 2-Ln, 1-NCON, 0-CON
int pos;
int lyr, neuronsInLyr, Nneuron, con; // numero layer, numero neuroni in dato layer, numero connessioni d'uscita in un dato neurone
int Tlyr, Tneuron; // target layer, target neuron
int conFlag; // 0 - inserisco la connessione, 1 - inserisco il peso
float bayes;
//file.open(filename + ".txt");
stringstream stream;
stream << file.rdbuf();
if (file.is_open()) {
//cout << "si e' apertpo" << endl;
while (getline(stream, line, '\n')) { //smista righe
stringstream streamline(line);
pos = 0;
while (getline(streamline, segment, ' ')) { // smista segmenti riga
//cout << segment << endl;
if (flag != 0 && pos == 0) {
if (segment == "CON") {
flag = 0;
}
else if (segment == "NCON") {
flag = 1;
}
else if (segment == "L0") {
flag = 2;
}
else if (segment == "NL") {
flag = 3;
}
}
if (flag == 0 && segment != "CON") { // dichiarazione delle connessioni
//cout << "flag0" << endl;
if (pos == 0) {
stringstream streamsegment(segment);
getline(streamsegment, pice, '-'); // file syntax CON \n 0-0
lyr = stoi(pice); // rimane fissato per tutta la riga
getline(streamsegment, pice);
Nneuron = stoi(pice);
conFlag = 0;
}
else if (lyr != nLayers) {
if (conFlag == 0) { //inizializzo la connessione
stringstream streamsegment(segment);
getline(streamsegment, pice, '-'); // file syntax CON \n 1-0
Tlyr = stoi(pice);
getline(streamsegment, pice, ' ');
Tneuron = stoi(pice);
Layers[lyr].Neurons[Nneuron].OutArcs[(pos - 1) / 2].target = &(Layers[Tlyr].Neurons[Tneuron]);
Layers[Tlyr].Neurons[Tneuron].numInArcs++;
conFlag = 1;
}
else { //inizzializzo il peso della connessione
Layers[lyr].Neurons[Nneuron].OutArcs[(pos - 2) / 2].weight = stof(segment);
conFlag = 0;
}
}
}
else if (flag == 1 && segment != "NCON") { // dichiarazione numero connessioni
//cout << "flag1" << endl;
if (pos == 0) {
stringstream streamsegment(segment);
getline(streamsegment, pice, '-'); // file syntax NCON \n 0-0_4
lyr = stoi(pice);
getline(streamsegment, pice, '_');
Nneuron = stoi(pice);
getline(streamsegment, pice, ':');
con = stoi(pice);
getline(streamsegment, pice, ' ');
bayes = stof(pice);
if (con >= 0) {
Layers[lyr].Neurons[Nneuron].numOutArcs = con;
nArc += con;
Layers[lyr].Neurons[Nneuron].OutArcs.resize(con, arc()); //= new vector<arc>(con); // dichiarazione numero connessioni del neurone
Layers[lyr].Neurons[Nneuron].layer = lyr;
Layers[lyr].Neurons[Nneuron].column = Nneuron;
Layers[lyr].Neurons[Nneuron].bayes = bayes;
}
}
}
else if (flag == 2) { // numero neuroni per layer
//cout << "flag2" << endl;
if (pos == 0) {
lyr = stoi(segment.erase(0, 1));
}
else {
neuronsInLyr = stoi(segment);
Layers[lyr].numNeurons = neuronsInLyr;
nNeurons += neuronsInLyr;
Layers[lyr].Neurons.resize(neuronsInLyr, Neuron()); // = new vector<Neuron>(neuronsInLyr); // dichiarazione numero di neuroni per layer
//cout << segment << " " << lyr << endl;
}
}
else if (flag == 3 && pos != 0) { // numero layer
//cout << "flag3" << endl;
Layers.resize(stoi(segment), Layer()); //= new vector<Layer>(stoi(segment)); // dichiarazione numero di layer
nLayers = stoi(segment);
//cout << segment << endl;
}
pos++;
}
}
for (int i = 0; i < Layers[nLayers - 1].numNeurons; i++) {
Layers[nLayers - 1].Neurons[i].column = i;
Layers[nLayers - 1].Neurons[i].layer = nLayers - 1;
}
}
else {
cout << genoma + ": errore nell'apertura o file non trovato" << endl;
}
file.close();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////FUNZIONE COSTRUZIONE DATASET DA FILE//////////////////////////////////////////////////////
void getDataset(string filename) {
ifstream file(filename + ".txt");
string line, segment, pice;
int flag = 4; // 3- NE, 2-IN, 1-OUT, 0-ES
int pos, esPos;
int input, output, es; // numero layer, numero neuroni in dato layer, numero connessioni d'uscita in un dato neurone
int IOFlag = 0; // 0 - input esempio, 1 - output esempio
//file.open(filename + ".txt");
stringstream stream;
stream << file.rdbuf();
if (file.is_open()) {
//cout << "si e' apertpo" << endl;
while (getline(stream, line, '\n')) { //smista righe
stringstream streamline(line);
pos = 0;
while (getline(streamline, segment, ' ')) { // smista segmenti riga
//cout << segment << endl;
if (flag != 0 && pos == 0) {
if (segment == "ES") {
flag = 0;
}
else if (segment == "OUT") {
flag = 1;
}
else if (segment == "IN") {
flag = 2;
}
else if (segment == "NE") {
flag = 3;
}
}
if (flag == 0 && segment == "ES") { // caricamento degli esmpi
//cout << "flag0" << endl;
goto examplesLoading;
}
else if (flag == 1 && pos != 0) { // numero output
//cout << "flag1" << endl;
output = stoi(segment);
if (output != numNeurons(nLayers - 1)) { cout << "Il dataset deve avere lo stesso numero di output della rete!!" << endl; ClearDataset(); return; }
}
else if (flag == 2 && pos != 0) { // numero input
//cout << "flag2" << endl;
input = stoi(segment);
if (input != numNeurons(0)) { cout << "Il dataset deve avere lo stesso numero di input della rete!!" << endl; ClearDataset(); return; }
}
else if (flag == 3 && pos != 0) { // numero esempi
//cout << "flag3" << endl;
examples.resize(stoi(segment), example()); //= new vector<Layer>(stoi(segment)); // dichiarazione numero di layer
//cout << segment << endl;
}
pos++;
}
}
examplesLoading:
esPos = 0;
while (getline(stream, line, '\n')) { //smista righe
stringstream streamline(line);
pos = 0;
if (!IOFlag) { // carico gli input
examples[esPos].input.resize(input);
while (getline(streamline, segment, ' ')) { // smista segmenti riga
examples[esPos].input[pos] = stof(segment);
pos++;
}
IOFlag = 1;
}
else { // carico gli output
examples[esPos].Doutput.resize(output);
while (getline(streamline, segment, ' ')) { // smista segmenti riga
examples[esPos].Doutput[pos] = stof(segment);
pos++;
}
IOFlag = 0;
esPos++;
}
}
}
else {
cout << genoma + ": errore nell'apertura o file non trovato" << endl;
}
file.close();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////SALVA RETE SU FILE///////////////////////////////////////////////////////////////
void saveNet(string filename = "") {
cout << "saving net.. " << endl;
ofstream file;
if (filename != "") {
file.open(filename + ".txt");
}
else {
file.open(genoma + ".txt");
}
file << "NL " << nLayers << '\n';
for (int i = 0; i < nLayers; i++) file << "L" << i << " " << Layers[i].numNeurons << '\n';
file << "NCON" << '\n';
for (int b = 0; b < nLayers; b++) {
for (int d = 0; d < Layers[b].numNeurons; d++) {
file << b << "-" << d << "_" << Layers[b].Neurons[d].numOutArcs << ":" << Layers[b].Neurons[d].bayes << '\n';
}
}
file << "CON";
for (int b = 0; b < nLayers; b++) {
//cout << "salvataggio connessioni layer:" << b << endl;
for (int d = 0; d < Layers[b].numNeurons; d++) {
file << '\n';
file << b << "-" << d;
for (int c = 0; c < Layers[b].Neurons[d].numOutArcs; c++) {
file << " ";
file << Layers[b].Neurons[d].OutArcs[c].target->layer << "-" << Layers[b].Neurons[d].OutArcs[c].target->column << " " << Layers[b].Neurons[d].OutArcs[c].weight;
}
}
}
file.close();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////SALVA DATASET SU FILE///////////////////////////////////////////////////////////////
void saveDataset(string filename) {
ofstream file(filename + ".txt");
file << "NE " << examples.size() << '\n';
file << "IN " << Layers[0].numNeurons << '\n';
file << "OUT " << Layers[nLayers - 1].numNeurons << '\n';
file << "ES" << '\n';
for (int i = 0; i < examples.size(); i++) {
for (int j = 0; j < examples[i].input.size(); j++) { // salvo gli input
file << examples[i].input[j];
if (j == examples[i].input.size() - 1) { file << '\n'; }
else { file << " "; }
}
for (int j = 0; j < examples[i].Doutput.size(); j++) { //salvo gli output
file << examples[i].Doutput[j];
if (j == examples[i].Doutput.size() - 1) { file << '\n'; }
else { file << " "; }
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////// FUNZIONI MODIFICA RETE//////////////////////////////////////////////////////////
void deleteArc(int Nlayer, int Ncolumn, int targetLayer, int targetColumn) {
for (int i = 0; i < Layers[Nlayer].Neurons[Ncolumn].numOutArcs; i++) {
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->layer == targetLayer && Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->column == targetColumn) {
//modifico i parametri della rete relazionati a tale arco
Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->numInArcs--;
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->influenceInput.size() > 0)Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->influenceInput.pop_back();
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->influenceOutput.size() > 0)Layers[Nlayer].Neurons[Ncolumn].influenceOutput.pop_back();
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->numInArcs == 0 && Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->layer != 0) {
deleteNeuron(Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->layer, Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->column);
}
else {
Layers[Nlayer].Neurons[Ncolumn].OutArcs.erase(Layers[Nlayer].Neurons[Ncolumn].OutArcs.begin() + i);
Layers[Nlayer].Neurons[Ncolumn].numOutArcs--;
}
return;
}
}
cout << "errore: arco (" << Nlayer << "-" << Ncolumn << ") -> (" << targetLayer << "-" << targetColumn << ") non trovato, eliinazione fallita" << endl;
}
void deleteArcByRef(int Nlayer, int Ncolumn, arc *targetArc) { //elimina un arco con gli indici della base e il puntatore alla connsessione
//modifico i parametri della rete relazionati a tale arco
targetArc->target->numInArcs--;
if (targetArc->target->influenceInput.size() > 0)targetArc->target->influenceInput.pop_back();
if (Layers[Nlayer].Neurons[Ncolumn].influenceOutput.size() > 0)Layers[Nlayer].Neurons[Ncolumn].influenceOutput.pop_back();
if (targetArc->target->numInArcs == 0) {
deleteNeuron(targetArc->target->layer, targetArc->target->column);
}
else {
Layers[Nlayer].Neurons[Ncolumn].OutArcs.erase(Layers[Nlayer].Neurons[Ncolumn].OutArcs.begin() + getOutConTargetID(getTarget(Nlayer, Ncolumn), targetArc->target));
Layers[Nlayer].Neurons[Ncolumn].numOutArcs--;
}
}
void deleteNeuron(int Nlayer, int Ncolumn) {
// elimino l'arco dai neuroni che puntano al nodo da eliminare e
// decremento il numero di archi in output hai neuroni con come target il neurone da eliminare
for (int i = 0; i < Nlayer; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
if (Layers[i].Neurons[j].OutArcs[k].target->layer == Nlayer && Layers[i].Neurons[j].OutArcs[k].target->column == Ncolumn) {
deleteArcByRef(i, j, &Layers[i].Neurons[j].OutArcs[k]);
}
}
}
}
// eliminio gli archi del neurone da eliminare
for (int i = 0; i < Layers[Nlayer].Neurons[Ncolumn].numOutArcs; i++) {
deleteArcByRef(Nlayer, Ncolumn, &Layers[Nlayer].Neurons[Ncolumn].OutArcs[i]);
}
// decremento l'indice colonna dei neuroni successivi nello stesso layer
for (int i = Ncolumn + 1; i < Layers[Nlayer].numNeurons; i++) Layers[Nlayer].Neurons[i].column--;
// elimino il neurone
Layers[Nlayer].Neurons.erase(Layers[Nlayer].Neurons.begin() + Ncolumn);
Layers[Nlayer].numNeurons--;
nNeurons--;
}
void addArc(int Nlayer, int Ncolumn, int targetLayer, int targetColumn) {
arc newArc; newArc.target = &(Layers[targetLayer].Neurons[targetColumn]); newArc.weight = 0.01f;
Layers[targetLayer].Neurons[targetColumn].numInArcs++;
Layers[Nlayer].Neurons[Ncolumn].OutArcs.push_back(newArc);
Layers[Nlayer].Neurons[Ncolumn].numOutArcs++;
}
void addNeuron(int Nlayer, float inConFill, float outConFill) {
//init new neuron
Neuron newNeur;
newNeur.layer = Nlayer;
newNeur.column = Layers[Nlayer].numNeurons; // column start at 0 , numNeurons start at 1
int nBackNeurons = 0; for (int i = 0; i < Nlayer; i++) nBackNeurons += Layers[i].numNeurons;
int nFrontNeurons = 0; for (int i = Nlayer + 1; i < nLayers; i++) nFrontNeurons += Layers[i].numNeurons;
newNeur.numOutArcs = (int)(outConFill*nFrontNeurons);
newNeur.numInArcs = (int)(inConFill*nBackNeurons);
newNeur.OutArcs.resize(newNeur.numOutArcs);
//bind neuron with neuron of front layers (OUTPUT CONNECTIONS)
int x, y = 0;
vector<u_int> OutArcAcces = casualVector(newNeur.numOutArcs);
vector<u_int> NetAcces = casualVector(nFrontNeurons);
while (y < OutArcAcces.size()) {
x = 0;
for (int i = Nlayer + 1; i < nLayers; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
if (y > OutArcAcces.size() - 1) continue;
if (NetAcces[y] == x) {
newNeur.OutArcs[OutArcAcces[y]].target = &(Layers[i].Neurons[j]);
newNeur.OutArcs[OutArcAcces[y]].weight = 0.01f;
y++;
}
x++;
}
}
}
cout << "connessioni output create" << endl << endl;
WeightsStamp("w");
// inserisco il neurone nella rete
//NOTA push_back() reinizializa il vettore modificandi i puntatori alle celle, tutte le connessioni al layer che viene aggiornato vengono perse
vector<conMap> connections = saveConsTowardsLyr(Nlayer); // salvo le connessioni al layer
Layers[Nlayer].Neurons.push_back(newNeur); // immetto il nuovo neurone (i riferimenti del vettore cambiano)
Layers[Nlayer].numNeurons++;
loadConsTowardsLyr(connections); // aggiorno i puntatori collegati ai neuroni di questo vettore
WeightsStamp("w");
cout << "neurone inserito" << endl << endl;
// bind neuron with back layers (INPUT CONNECTIONS)
NetAcces = casualVector(nBackNeurons);
arc newArc; newArc.weight = 0.01f;
y = 0;
while (y < newNeur.numInArcs) {
x = 0;
for (int i = 0; i < Nlayer; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
if (y > newNeur.numInArcs - 1) continue;
if (NetAcces[y] == x) {
newArc.target = &(Layers[Nlayer].Neurons[Layers[Nlayer].numNeurons - 1]);
//Layers[Nlayer].Neurons[Layers[Nlayer].numNeurons - 1].numInArcs++;
Layers[i].Neurons[j].OutArcs.push_back(newArc);
Layers[i].Neurons[j].numOutArcs++;
cout << "BIND:" << y << " (" << i << "-" << j << ") -> (" << Nlayer << "-" << Layers[Nlayer].numNeurons - 1 << ") " << endl;
WeightsStamp("w");
y++;
}
x++;
}
}
}
nNeurons++;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////FUNZIONI VARIE///////////////////////////////////////////////////////////
float DsigOut(int Layer, int Neuron) {
//derivata della funzione sigmoide (Y*(1-Y))
//RICORDA di aggiungere k se implementi la sensibilità della sigmoide
return Layers[Layer].Neurons[Neuron].output*(1 - Layers[Layer].Neurons[Neuron].output);
}
float sigmoid(float x) { return 1 / (1 + exp(-x)); } // Sigmoide
float logit(float x) { return log(x / (1 - x)); } // funzione sigmoide inversa
float gaussian(float x, float mu, float var) { return (1 / (var*sqrt(2 * M_PI)))*exp(-((pow(x - mu, 2.0)) / (2 * var))); } // Gaussiana(float x, float mu, float var)
void WeightsStamp(string mode = "a") {
//m - stampa le medie dei pesi di ogni layer
//a - stampa tutti i pesi della rete con alcuni parametri di apprendimento
//w - stampa tutti i pesi con il riferimento riga colonna al target
//fc - stampa le medie dei gruppi di pesi tra due layer
if (mode == "m") { // medie dei layer
float mean;
int x;
for (int i = 0; i < numLayers() - 1; i++) {
x = 0;
mean = 0;
for (int c = 0; c < numNeurons(i); c++) {
for (int d = 0; d < numCon(i, c); d++) {
mean += getWeight(i, c, d);
x++;
}
}
mean /= x;
cout << "Layer " << i << " weights:" << mean << endl;
}
}
else if (mode == "a") { // stampa tutti i pesi e i parametri dei neuroni
for (int i = 0; i < numLayers() - 1; i++) {
for (int c = 0; c < numNeurons(i); c++) {
cout << "(" << i << "-" << c << ") output: " << getOutput(i, c) << " BPerr: " << getBPerr(i, c) << endl;
for (int d = 0; d < numCon(i, c); d++) {
cout << getWeight(i, c, d) << " (" << getDeltaWeight(i, c, d) << ") ";
}
cout << endl;
}
}
}
else if (mode == "w") { // stampa tutti i pesi con il riferimento al target
for (int i = 0; i < numLayers() - 1; i++) {
for (int c = 0; c < numNeurons(i); c++) {
cout << "(" << i << "-" << c << ")" << "Con: " << numCon(i, c) << " InCon: " << numInCon(i, c) << endl;
for (int d = 0; d < numCon(i, c); d++) {
cout << getWeight(i, c, d) << " (" << getConTargetLyr(i, c, d) << "," << getConTargetCol(i, c, d) << ") ";
}
cout << endl;
}
}
}
else if (mode == "fc") { // stampa le medie dei pesi tra layer e layer
vector<float> wLyr(nLayers - 1);
vector<int> nwLyr(nLayers - 1);
float wl = 0;
for (int i = 0; i < nLayers - 1; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
wLyr[Layers[i].Neurons[j].OutArcs[k].target->layer - 1] += Layers[i].Neurons[j].OutArcs[k].weight;
nwLyr[Layers[i].Neurons[j].OutArcs[k].target->layer - 1]++;
}
}
cout << "Layer " << i << " weights: ";
for (int j = 0; j < wLyr.size(); j++) {
wLyr[j] /= nwLyr[j];
if (j + 1 > i)cout << "(" << i << ", " << j + 1 << ") [" << nwLyr[j] << "] " << wLyr[j] << " ";
}
cout << endl;
fill(wLyr.begin(), wLyr.end(), 0);
fill(nwLyr.begin(), nwLyr.end(), 0);
}
}
else {
cout << "WeightsStamp() argument error!" << endl;
}
cout << endl;
}
void sigLayer(int lyr) { // applica la sigmoide a tutti i campi output dei neuroni nel layer specificato
for (int i = 0; i < Layers[lyr].numNeurons; i++) {
Layers[lyr].Neurons[i].output = sigmoid(Layers[lyr].Neurons[i].output);
}
}
void bayesLayer(int lyr, bool absSum = false) {//applica il bayes all'output di ogni neurone del dato layer
for (int i = 0; i < Layers[lyr].numNeurons; i++) {
Layers[lyr].Neurons[i].output += Layers[lyr].Neurons[i].bayes;
}
// se la variabile absSum è true sommo il bayes in valore assoluto alla variabile absOutSum
if (absSum == true) { for (int i = 0; i < Layers[lyr].numNeurons; i++) { Layers[lyr].Neurons[i].absOutSum += abs(Layers[lyr].Neurons[i].bayes); } }
}
void resetPotential() {
// esegue il reset del potenziale di tutti i neuroni della rete
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < numNeurons(i); j++) {
Layers[i].Neurons[j].output = 0;
}
}
}
void resetAbsSumPotenzial() {
// esegue il reset della sommatoria di ogni input in valore assoluto di ogni neurone
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < numNeurons(i); j++) {
Layers[i].Neurons[j].absOutSum = 0;
}
}
}
void resetAbsSumDelta() {
// esegue il reset della sommatoria di ogni input in valore assoluto di ogni neurone
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < numNeurons(i); j++) {
Layers[i].Neurons[j].absDeltaSum = 0;
}
}
}
void resetBPerr() { // esegue il reset dell'errore retropropagato in ogni neurone
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < numNeurons(i); j++) {
Layers[i].Neurons[j].BPerr = 0;
}
}
}
void resetNeuronsID() {
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
Layers[i].Neurons[j].layer = i;
Layers[i].Neurons[j].column = j;
}
}
}
vector<conMap> saveConsTowardsLyr(int Layer) { // salva su vettore i riferimenti numerici delle connesioni verso il layer specificato
vector<conMap> connections;
conMap con;
for (int i = 0; i < Layer; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
if (Layers[i].Neurons[j].OutArcs[k].target->layer == Layer) {
con.startCol = j;
con.startLyr = i;
con.arcRef = k;
con.targetCol = Layers[i].Neurons[j].OutArcs[k].target->column;
con.targetLyr = Layers[i].Neurons[j].OutArcs[k].target->layer;
connections.push_back(con);
}
}
}
}
return connections;
}
void loadConsTowardsLyr(vector<conMap> con) { // ricarica i riferimenti numerici delle connesioni verso il layer specificato
for (int i = 0; i < con.size(); i++) {
Layers[con[i].startLyr].Neurons[con[i].startCol].OutArcs[con[i].arcRef].target = getTarget(con[i].targetLyr, con[i].targetCol);
}
}
void ClearDataset() { examples.clear(); }
void genTestDataset(int nExe, int nIn, int nOut, float step, int type, float offset) { //generazione di una serie storica del seno (DEBUG)
cout << "creating examples.." << endl;
examples.resize(nExe);
float x = 0, x2 = 0;
for (int i = 0; i < nExe; i++) {
examples[i].input.resize(nIn);
examples[i].Doutput.resize(nOut);
switch (type) {
case 0: // debug dataset
for (int j = nIn - 1; j >= 0; j--) {
examples[i].input[j] = 1 + offset;
}
for (int j = 0; j < nOut; j++) {
examples[i].Doutput[j] = 1 + offset;
}
break;
case 1: // funzione predizione
for (int j = 0; j < nIn; j++) {
examples[i].input[j] = sin(x2) + offset;
x2 += step;
}
for (int j = 0; j < nOut; j++) {
examples[i].Doutput[j] = sin(x2) + offset;
x2 += step;
}
x += step;
x2 = x;
break;
case 2: // funzione uguale
for (int j = 0; j < nIn; j++) {
examples[i].input[j] = sin(x2) + offset;
x2 += step;
}
x2 = x;
for (int j = 0; j < nOut; j++) {
examples[i].Doutput[j] = sin(x2) + offset;
x2 += step;
}
x += step;
x2 = x;
break;
case 3: // funzione specchio
for (int j = nIn - 1; j >= 0; j--) {
examples[i].input[j] = sin(x2) + offset;
x2 += step;
}
x2 = x;
for (int j = 0; j < nOut; j++) {
examples[i].Doutput[j] = sin(x2) + offset;
x2 += step;
}
x += step;
x2 = x;
break;
}
}
}
void setNetMap(float max, float min) {
map.resize(Layers[nLayers - 1].numNeurons);
for (int i = 0; i < map.size(); i++) {
map[i].maxValue = max;
map[i].minValue = min;
}
}
float reverseMap(int neur, float val) {
return (val - map[neur].minValue) / (map[neur].maxValue - map[neur].minValue);
}
vector<u_int> casualVector(int in, int start = 0) {
// crea un vettore di n ellementi successivi e li disordina
// creazione di una tabella di accesso casuale per un secondo vettore
vector<u_int> out(in);
for (int i = 0; i < out.size(); i++) out[i] = i + start;
random_shuffle(out.begin(), out.end());
return out;
}
/*vector<T, A>*/
template<typename T, typename A>
void shackeVector(vector<T, A> const& vec) {
//esegue il mescolamento degli elementi all'interno di un oggetto vector
random_shuffle(vec.begin(), vec.end());
//return vec;
}
void refreshNeurIdx() {
int idx = 0;
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < numNeurons(i); j++) {
getTarget(i, j)->neurIdx = idx++;
}
}
}
void datasetOffset(float offset) {
for (int i = 0; i < examples.size(); i++) {
for (int j = 0; j < examples[i].Doutput.size(); j++) {
examples[i].Doutput[j] += offset;
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////ACCESSO A VARIABILI PRIVATE///////////////////////////////////////////////////
int numLayers() { return nLayers; }
int numNeurons(int Layer) { return Layers[Layer].numNeurons; }
int numCon(int Layer, int Neuron) { return Layers[Layer].Neurons[Neuron].numOutArcs; }
int numInCon(int Layer, int Neuron) { return Layers[Layer].Neurons[Neuron].numInArcs; }
int getConTargetLyr(int Layer, int Neuron, int Arc) { return (int)Layers[Layer].Neurons[Neuron].OutArcs[Arc].target->layer; }
int getConTargetCol(int Layer, int Neuron, int Arc) { return (int)Layers[Layer].Neurons[Neuron].OutArcs[Arc].target->column; }
float getWeight(int Layer, int Neuron, int Arc) { return Layers[Layer].Neurons[Neuron].OutArcs[Arc].weight; }
float getDeltaWeight(int Layer, int Neuron, int Arc) { return Layers[Layer].Neurons[Neuron].OutArcs[Arc].oldDelta; }
float getOutput(int Layer, int Neuron) { return Layers[Layer].Neurons[Neuron].output; }
float getBPerr(int Layer, int Neuron) { return Layers[Layer].Neurons[Neuron].BPerr; }
ptNeuron getTarget(int Layer, int Neuron) { return &(Layers[Layer].Neurons[Neuron]); }
ptNeuron getConTarget(int Layer, int Neuron, int Conn) { return Layers[Layer].Neurons[Neuron].OutArcs[Conn].target; }
int getOutConTargetID(ptNeuron base, ptNeuron target) {
for (int i = 0; i < base->numOutArcs; i++) {
if (base->OutArcs[i].target == target) {
return i;
}
}
cout << "connessione non trovata ERRORE!" << endl;
return -1;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////WINDOWS HIGH SPEED TIMING//////////////////////////////////////////////////////
BOOL WINAPI QueryPerformanceCounter(_Out_ LARGE_INTEGER *lpPerformanceCount);
BOOL WINAPI QueryPerformanceFrequency(_Out_ LARGE_INTEGER *lpFrequency);
inline long long PerformanceCounter() noexcept
{
LARGE_INTEGER li;
::QueryPerformanceCounter(&li);
return li.QuadPart;
}
inline long long PerformanceFrequency() noexcept
{
LARGE_INTEGER li;
::QueryPerformanceFrequency(&li);
return li.QuadPart;
}
/* HOW TO USE:
long long t0 = PerformanceCounter();
//code to bench..
long long t1 = PerformanceCounter();
double elapsedMilliseconds = ((t1 - t0) * 1000.0) / PerformanceFrequency();
*/
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
};
class MLP : public Network {
protected:
//Hopfield *Supporter;
public:
float NetPerformance = 0; // tempo di esecuzione medio in millisecondi
float NetErrPercent = 0; //errore percentuale medio associato alla rete
MLP(string filename) :Network(filename) {};
////////////////////////////////////////////////FUNZIONI CREAZIONE RETE///////////////////////////////////////////////////////
//CREAZIONE RETE QUADRATA
void qubeNet(int Nlayers, int Ncolumns, int input, int output, bool c, float initValue = 0.01f) { //TODO inserire n° neuroni di input e output!!
// testata e funzionante
// nlayers - numero layer della rete
// Ncolumns - numero neuroni per strato
// c - se true inizializa casualmente i pesi altrimenti sono inizializati tutti a 1
Layers.resize(Nlayers, Layer()); //= new vector<Layer>(stoi(segment)); // dichiarazione numero di layer
nLayers = Nlayers;
cout << "Declaring structure...." << endl;
//per lo strato di input
Layers[0].numNeurons = input;
Layers[0].Neurons.resize(input, Neuron());
nNeurons += input;
for (int j = 0; j < input; j++) {
Layers[0].Neurons[j].OutArcs.resize(Ncolumns, arc());
Layers[0].Neurons[j].numOutArcs = Ncolumns;
Layers[0].Neurons[j].layer = 0;
Layers[0].Neurons[j].column = j;
nArc += Ncolumns;
}
for (int i = 1; i < Nlayers - 1; i++) {
// per gli strati intermedi
Layers[i].numNeurons = Ncolumns;
Layers[i].Neurons.resize(Ncolumns, Neuron());
nNeurons += Ncolumns;
for (int j = 0; j < Ncolumns; j++) {
if (i < Nlayers - 2) {
Layers[i].Neurons[j].OutArcs.resize(Ncolumns, arc());
Layers[i].Neurons[j].numOutArcs = Ncolumns;
nArc += Ncolumns;
}
else {
Layers[i].Neurons[j].OutArcs.resize(output, arc());
Layers[i].Neurons[j].numOutArcs = output;
nArc += output;
}
Layers[i].Neurons[j].layer = i;
Layers[i].Neurons[j].column = j;
}
}
//per lo strato di output
Layers[Nlayers - 1].numNeurons = output;
Layers[Nlayers - 1].Neurons.resize(output, Neuron());
nNeurons += output;
for (int j = 0; j < output; j++) {
Layers[Nlayers - 1].Neurons[j].layer = Nlayers - 1;
Layers[Nlayers - 1].Neurons[j].column = j;
}
float initWeight = initValue;
float Q = sqrt(3)*sqrt(2);
int M = 10;
cout << "initializing wheights...." << endl;
for (int j = 0; j < input; j++) {
for (int k = 0; k < Ncolumns; k++) {
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() - 80); initWeight = (rand() % M) / Q; } }
Layers[0].Neurons[j].OutArcs[k].target = &(Layers[1].Neurons[k]);
Layers[0].Neurons[j].OutArcs[k].weight = initWeight;
Layers[1].Neurons[k].numInArcs++;
}
}
for (int i = 1; i < Nlayers - 2; i++) {
//cout << "Layer:" << i << endl;
for (int j = 0; j < Ncolumns; j++) {
for (int k = 0; k < Ncolumns; k++) {
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() + 50); initWeight = (rand() % M) / Q; } }
Layers[i].Neurons[j].OutArcs[k].target = &(Layers[i + 1].Neurons[k]);
Layers[i].Neurons[j].OutArcs[k].weight = initWeight;
Layers[i + 1].Neurons[k].numInArcs++;
}
}
}
for (int j = 0; j < Ncolumns; j++) {
for (int k = 0; k < output; k++) {
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() / 0.7556); initWeight = (rand() % M) / Q; } }
Layers[Nlayers - 2].Neurons[j].OutArcs[k].target = &(Layers[Nlayers - 1].Neurons[k]);
Layers[Nlayers - 2].Neurons[j].OutArcs[k].weight = initWeight;
Layers[Nlayers - 1].Neurons[k].numInArcs++;
}
}
refreshNeurIdx();
}
//CREAZIONE RETE QUADRATA COMPLETAMENTE CONNESSA
void qubeNetFC(int Nlayers, int Ncolumns, int input, int output, bool c, float initValue = 0.01f) {
//TODO inserire n° neuroni di input e output!!
// testata e funzionante
// nlayers - numero layer della rete
// Ncolumns - numero neuroni per strato
// c - se true inizializa casualmente i pesi altrimenti sono inizializati tutti a 1
Layers.resize(Nlayers, Layer()); //= new vector<Layer>(stoi(segment)); // dichiarazione numero di layer
nLayers = Nlayers;
cout << "Declaring structure...." << endl;
//per lo strato di input
Layers[0].numNeurons = input;
Layers[0].Neurons.resize(input, Neuron());
nNeurons += input;
int numA = (nLayers - 2)*Ncolumns + output;
nArc += input * numA;
for (int j = 0; j < input; j++) {
Layers[0].Neurons[j].OutArcs.resize(numA, arc());
Layers[0].Neurons[j].numOutArcs = numA;
Layers[0].Neurons[j].layer = 0;
Layers[0].Neurons[j].column = j;
}
for (int i = 1; i < Nlayers - 1; i++) {
// per gli strati intermedi
numA = (Nlayers - i - 2)*Ncolumns + output;
Layers[i].numNeurons = Ncolumns;
Layers[i].Neurons.resize(Ncolumns, Neuron());
nNeurons += Ncolumns;
for (int j = 0; j < Ncolumns; j++) {
if (i < Nlayers - 2) {
Layers[i].Neurons[j].OutArcs.resize(numA, arc());
Layers[i].Neurons[j].numOutArcs = numA;
nArc += numA;
}
else {
Layers[i].Neurons[j].OutArcs.resize(output, arc());
Layers[i].Neurons[j].numOutArcs = output;
nArc += output;
}
Layers[i].Neurons[j].layer = i;
Layers[i].Neurons[j].column = j;
}
}
//per lo strato di output
Layers[Nlayers - 1].numNeurons = output;
Layers[Nlayers - 1].Neurons.resize(output, Neuron());
nNeurons += output;
for (int j = 0; j < output; j++) {
Layers[Nlayers - 1].Neurons[j].layer = Nlayers - 1;
Layers[Nlayers - 1].Neurons[j].column = j;
}
float initWeight = initValue;
float Q = sqrt(3)*sqrt(2);
int M = 10;
int arcRef = 0;
cout << "initializing weights...." << endl;
// inizializzo pesi strato input
for (int j = 0; j < input; j++) {
for (int k = 0; k < (Nlayers - 2); k++) {
for (int n = 0; n < Ncolumns; n++) {
numA = k * Ncolumns + n;
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() - 80); initWeight = (rand() % M) / Q; } }
Layers[0].Neurons[j].OutArcs[numA].target = &(Layers[k + 1].Neurons[n]);
Layers[0].Neurons[j].OutArcs[numA].weight = initWeight;
Layers[k + 1].Neurons[n].numInArcs++;
}
}
for (int n = 0; n < output; n++) {
numA = (Nlayers - 2) * Ncolumns + n;
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() - 80); initWeight = (rand() % M) / Q; } }
Layers[0].Neurons[j].OutArcs[numA].target = &(Layers[Nlayers - 1].Neurons[n]);
Layers[0].Neurons[j].OutArcs[numA].weight = initWeight;
Layers[Nlayers - 1].Neurons[n].numInArcs++;
}
}
//inizializzo pesi strati intermedi
for (int i = 1; i < Nlayers - 2; i++) {
//cout << "Layer:" << i << endl;
for (int j = 0; j < Ncolumns; j++) {
for (int k = i + 1; k < Nlayers - 1; k++) {
for (int w = 0; w < Ncolumns; w++) {
numA = (k - i - 1)*Ncolumns + w;
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() + 50); initWeight = (rand() % M) / Q; } }
Layers[i].Neurons[j].OutArcs[numA].target = &(Layers[k].Neurons[w]);
Layers[i].Neurons[j].OutArcs[numA].weight = initWeight;
Layers[k].Neurons[w].numInArcs++;
}
}
for (int k = 0; k < output; k++) {
numA = (Nlayers - 2 - i)*Ncolumns + k;
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() + 50); initWeight = (rand() % M) / Q; } }
Layers[i].Neurons[j].OutArcs[numA].target = &(Layers[Nlayers - 1].Neurons[k]);
Layers[i].Neurons[j].OutArcs[numA].weight = initWeight;
Layers[Nlayers - 1].Neurons[k].numInArcs++;
}
}
}
//inizializzo pesi strato output
for (int j = 0; j < Ncolumns; j++) {
for (int k = 0; k < output; k++) {
if (c) { initWeight = 0; while (abs(initWeight) > 0.05) { srand(clock() / 0.7556); initWeight = (rand() % M) / Q; } }
Layers[Nlayers - 2].Neurons[j].OutArcs[k].target = &(Layers[Nlayers - 1].Neurons[k]);
Layers[Nlayers - 2].Neurons[j].OutArcs[k].weight = initWeight;
Layers[Nlayers - 1].Neurons[k].numInArcs++;
}
}
refreshNeurIdx();
}
//CREAZIONE RETE CUSTOM
void customNet(int Nlayers, vector<int> Ncolumns, float conFill) {
//TODO aggiungere la conta delle connessioni in nArc durante la dichiarazione
if (Ncolumns.size() != Nlayers) { cout << "costumNet() FAILED! parameters error" << endl; return; } //errore
//dichiarazione layers
Layers.resize(Nlayers);
nLayers = Nlayers;
//dichiarazione neuroni per layer
for (int i = 0; i < Nlayers; i++) {
Layers[i].Neurons.resize(Ncolumns[i]);
Layers[i].numNeurons = Ncolumns[i];
nNeurons += Ncolumns[i];
for (int j = 0; j < Layers[i].numNeurons; j++) {
Layers[i].Neurons[j].layer = i;
Layers[i].Neurons[j].column = j;
}
}
//dichiarazione connessioni in uscita di ogni neurone
for (int i = 0; i < nLayers - 1; i++) {
cout << "Layer " << i << endl;
for (int j = 0; j < Layers[i].numNeurons; j++) {
cout << "Neuron " << j << endl;
int nFrontNeurons = 0; for (int g = i + 1; g < nLayers; g++) nFrontNeurons += Layers[g].numNeurons;
Layers[i].Neurons[j].numOutArcs = conFill * nFrontNeurons;
Layers[i].Neurons[j].OutArcs.resize(conFill* nFrontNeurons);
vector<u_int> OutArcAcces = casualVector(Layers[i].Neurons[j].numOutArcs);
vector<u_int> NetAcces = casualVector(nFrontNeurons);
int x, y = 0;
while (y < OutArcAcces.size()) {
x = 0;
for (int k = i + 1; k < nLayers; k++) {
for (int w = 0; w < Layers[k].numNeurons; w++) {
if (y > OutArcAcces.size() - 1) continue;
if (NetAcces[y] == x) {
Layers[i].Neurons[j].OutArcs[OutArcAcces[y]].target = &(Layers[k].Neurons[w]);
Layers[i].Neurons[j].OutArcs[OutArcAcces[y]].weight = 0.01f;
Layers[k].Neurons[w].numInArcs++;
y++; cout << y << endl;
}
x++;
}
}
}
}
}
refreshNeurIdx();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////STIMOLAZIONE RETE/////////////////////////////////////////////////////////////////
//procedura di propagazione dell'informazione
void inputNet(vector<float> &input, vector<float> &output) {
if (input.size() != Layers[0].numNeurons) { cout << "il vettore in input deve avere dim " << Layers[0].numNeurons << endl; return; }
if (output.size() != Layers[nLayers - 1].numNeurons) { cout << "il vettore in output deve avere dim " << Layers[nLayers - 1].numNeurons << endl; return; }
// carico il vettore di input nella rete
for (int i = 0; i < Layers[0].numNeurons; i++) {
Layers[0].Neurons[i].output = input[i];
}
//sigLayer(0);
// propagazione dello stimolo
for (int i = 0; i < nLayers - 1; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
Layers[i].Neurons[j].OutArcs[k].target->output += Layers[i].Neurons[j].output*Layers[i].Neurons[j].OutArcs[k].weight;
}
}
bayesLayer(i + 1);
sigLayer(i + 1);
}
float delta;
if (map.size() == 0) { cout << "Errore ... la mappatura degli output non è stata settata!!"; return; }
for (int i = 0; i < Layers[nLayers - 1].numNeurons; i++) {
delta = map[i].maxValue - map[i].minValue;
output[i] = ((Layers[nLayers - 1].Neurons[i].output)*delta) + map[i].minValue;
Layers[nLayers - 1].Neurons[i].output = output[i];
}
}
//esegue una propagazione dell'informazione salvando lo storico di propagazione degli input
void inputNetProfiler(vector<float> &input, vector<float> &output) {
if (input.size() != Layers[0].numNeurons) { cout << "il vettore in input deve avere dim " << Layers[0].numNeurons << endl; return; }
if (output.size() != Layers[nLayers - 1].numNeurons) { cout << "il vettore in output deve avere dim " << Layers[nLayers - 1].numNeurons << endl; return; }
// carico il vettore di input nella rete
for (int i = 0; i < Layers[0].numNeurons; i++) {
Layers[0].Neurons[i].output = input[i];
//Layers[0].Neurons[i].absOutSum = abs(input[i]);
}
//sigLayer(0);
// propagazione dello stimolo
for (int i = 0; i < nLayers - 1; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
Layers[i].Neurons[j].OutArcs[k].target->output += Layers[i].Neurons[j].output*Layers[i].Neurons[j].OutArcs[k].weight;
Layers[i].Neurons[j].OutArcs[k].target->absOutSum += abs(Layers[i].Neurons[j].output*Layers[i].Neurons[j].OutArcs[k].weight);
}
}
bayesLayer(i + 1, true);
sigLayer(i + 1);
}
resetVectorsProfiler(true, false);// resetto tutti i vettori
//inizializzo la propagazione dal primo layer
int IDinf = 0;
for (int i = 0; i < Layers[1].numNeurons; i++) { // scorro i neuroni del primo strato nascosto
for (int j = 0; j < Layers[0].numNeurons; j++) { // scorro i neuroni dello strato di input
for (int k = 0; k < numCon(0, j); k++) { // scorro le connessioni del neurone dello strato di input
if (&Layers[1].Neurons[i] == Layers[0].Neurons[j].OutArcs[k].target) {
Layers[1].Neurons[i].influenceInput[IDinf] = (abs(getTarget(0, j)->output*getTarget(0, j)->OutArcs[k].weight) / (getTarget(0, j)->OutArcs[k].target->absOutSum));
IDinf++;
}
}
}
IDinf = 0;
}
//inizzializzo la propagazione negli altri layer
for (int t = 1; t < nLayers - 1; t++) { // togliere il meno uno
IDinf = 0;
for (int i = 0; i < Layers[t + 1].numNeurons; i++) {
for (int j = 0; j < t + 1; j++) {
for (int w = 0; w < Layers[j].numNeurons; w++) {
for (int k = 0; k < numCon(j, w); k++) {
if (&Layers[t + 1].Neurons[i] == Layers[j].Neurons[w].OutArcs[k].target) {
Layers[t + 1].Neurons[i].influenceInput[IDinf] = (abs(getTarget(j, w)->output*getTarget(j, w)->OutArcs[k].weight) / (getTarget(j, w)->OutArcs[k].target->absOutSum));
IDinf++;
}
}
}
}
IDinf = 0;
}
}
float delta;
if (map.size() == 0) {
cout << "Errore ... la mappatura degli output non è stata settata!!" << endl;
return;
}
for (int i = 0; i < Layers[nLayers - 1].numNeurons; i++) {
delta = map[i].maxValue - map[i].minValue;
output[i] = ((Layers[nLayers - 1].Neurons[i].output)*delta) + map[i].minValue;
Layers[nLayers - 1].Neurons[i].output = output[i];
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////FUNZIONI DI ADDESTRAMENTO MLP////////////////////////////////////////////////////////
//Algoritmo di addestramento Back-propagation
void BP(int iter, float eps, float beta, float ErrPercent) {
float err = 0; // errore del singolo neurone
float Err = 0; // errore quadratio del singolo esempio
float Gerr = 0; // errore quadratico dell'intero dataset
float Perr = 0;
float delta = 0;
long long t0 = 0;
long long t1 = 0;
long long inT1 = 0;
double elapsedMilliseconds = 0;
double executionTime = 0;
double inputTime = 0;
int x = 0;
vector<float> Out(numNeurons(nLayers - 1)); // vettore supporto output rete
vector<float> GerrStory;
for (int t = 0; t < iter; t++) {
Gerr = 0;
Perr = 0;
for (int e = 0; e < examples.size(); e++) {
t0 = PerformanceCounter();
// eseguo l'esempio
inputNet(examples[e].input, Out);
t1 = PerformanceCounter();
elapsedMilliseconds = ((t1 - t0) * 1000.0) / PerformanceFrequency();
inputTime += elapsedMilliseconds;
// clacolo il delta delle variazioni dei pesi dello strato di output
Err = 0;
for (int r = 0; r < Out.size(); r++) {
delta = map[r].maxValue - map[r].minValue;
err = Out[r] - examples[e].Doutput[r];
Perr += abs(err / examples[e].Doutput[r]) * 100; // errore percentuale
Err += pow(err, 2);
if (x == e && t > -1) {
cout << "es." << x << " Y" << r << " = " << Out[r] << " " << "D" << r << " = " << examples[e].Doutput[r] << " err: " << abs(Out[r] - examples[e].Doutput[r]) << endl;
if (r == Out.size() - 1) { x--; WeightsStamp("fc"); /*Sleep(5000);*/ }
if (x < 0) x = examples.size() - 1;
}
//mErr = ((err- map[r].minValue)/ delta) ;
// calcolo il delta della variazione dei pesi
Layers[nLayers - 1].Neurons[r].BPerr = reverseMap(r, err) * (reverseMap(r, Out[r]) *(1 - reverseMap(r, Out[r])));
//applico le correzioni ai bayes
Layers[nLayers - 1].Neurons[r].oldBayesDelta = (-eps * (Layers[nLayers - 1].Neurons[r].BPerr)) + beta * Layers[nLayers - 1].Neurons[r].oldBayesDelta;
Layers[nLayers - 1].Neurons[r].bayes += Layers[nLayers - 1].Neurons[r].oldBayesDelta;
}
//applico le correzioni ai pesi dello strato di output
for (int i = 0; i < numNeurons(nLayers - 2); i++) {
for (int j = 0; j < numCon(nLayers - 2, i); j++) {
Layers[nLayers - 2].Neurons[i].OutArcs[j].oldDelta = (-eps * (Layers[nLayers - 2].Neurons[i].OutArcs[j].target->BPerr)*Layers[nLayers - 2].Neurons[i].output) + beta * Layers[nLayers - 2].Neurons[i].OutArcs[j].oldDelta;
Layers[nLayers - 2].Neurons[i].OutArcs[j].weight += Layers[nLayers - 2].Neurons[i].OutArcs[j].oldDelta;
}
}
// rieseguo la procedura per tutti gli altri strati
for (int i = nLayers - 2; i > 0; i--) { // dal penultimo strato al secondo
// clacolo il delta delle variazioni dei pesi dello strato i-1
for (int j = 0; j < numNeurons(i); j++) {
err = 0;
for (int k = 0; k < numCon(i, j); k++) err += Layers[i].Neurons[j].OutArcs[k].target->BPerr * Layers[i].Neurons[j].OutArcs[k].weight;
Layers[i].Neurons[j].BPerr = DsigOut(i, j)*err;
//applico le correzioni ai bayes
Layers[i].Neurons[j].oldBayesDelta = (-eps * (Layers[i].Neurons[j].BPerr)) + beta * Layers[i].Neurons[j].oldBayesDelta;
Layers[i].Neurons[j].bayes += Layers[i].Neurons[j].oldBayesDelta;
}
// applico le correzioni ai pesi dello strato i-1
for (int j = 0; j < numNeurons(i - 1); j++) {
for (int k = 0; k < numCon(i - 1, j); k++) {
Layers[i - 1].Neurons[j].OutArcs[k].oldDelta = (-eps * (Layers[i - 1].Neurons[j].OutArcs[k].target->BPerr)*Layers[i - 1].Neurons[j].output) + beta * Layers[i - 1].Neurons[j].OutArcs[k].oldDelta;
Layers[i - 1].Neurons[j].OutArcs[k].weight += Layers[i - 1].Neurons[j].OutArcs[k].oldDelta;
}
}
}
Gerr += Err;
resetPotential(); // azzero i potenziali di output della rete
t1 = PerformanceCounter();
elapsedMilliseconds = ((t1 - t0) * 1000.0) / PerformanceFrequency();
executionTime += elapsedMilliseconds;
}
Gerr /= 2;
Perr /= examples.size()*Out.size();
executionTime /= examples.size();
inputTime /= examples.size();
NetPerformance = inputTime;
NetErrPercent = Perr;
cout << "Iterazione " << t << " errore quadratico: " << Gerr << " errore percentuale medio: " << Perr << " %err exemple time: " << executionTime << " ms inputTime: " << inputTime << " ms" << endl;
//if (t > 100 && t < 200)Sleep(500);
if (Perr < ErrPercent) {
cout << "Percentuale di errore obbiettivo raggiunta!" << endl;
return;
}
}
}
//esecuzione del backpropagation per un solo esempio
void oneBP(float eps, float beta, example e) {
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////ALTRE FUNZIONI MLP///////////////////////////////////////////////////////////
void initVectorsProfiler() {
//inizializza i vettori di benchmark presenti nei neuroni
//inizializzazione vettori del primo strato
for (int j = 0; j < Layers[0].numNeurons; j++) {
Layers[0].Neurons[j].influenceOutput.resize(Layers[0].Neurons[j].numOutArcs);
fill(Layers[0].Neurons[j].influenceOutput.begin(), Layers[0].Neurons[j].influenceOutput.end(), 0);
}
for (int i = 1; i < nLayers - 1; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
Layers[i].Neurons[j].influenceInput.resize(Layers[i].Neurons[j].numInArcs);
Layers[i].Neurons[j].influenceOutput.resize(Layers[i].Neurons[j].numOutArcs);
// resetto tutti i vettori zero
fill(Layers[i].Neurons[j].influenceInput.begin(), Layers[i].Neurons[j].influenceInput.end(), 0);
fill(Layers[i].Neurons[j].influenceOutput.begin(), Layers[i].Neurons[j].influenceOutput.end(), 0);
}
}
//inizializzazione dei vettori dell'ultimo strato
for (int j = 0; j < Layers[nLayers - 1].numNeurons; j++) {
Layers[nLayers - 1].Neurons[j].influenceInput.resize(Layers[nLayers - 1].Neurons[j].numInArcs);
fill(Layers[nLayers - 1].Neurons[j].influenceInput.begin(), Layers[nLayers - 1].Neurons[j].influenceInput.end(), 0);
}
}
void resetVectorsProfiler(bool inInfl, bool outInfl) { //resetta a zero tutti i vettori di profilazione esclusi layer input e output
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
if (i > 0 && inInfl == true)fill(Layers[i].Neurons[j].influenceInput.begin(), Layers[i].Neurons[j].influenceInput.end(), 0);
if (i < nLayers - 1 && outInfl == true)fill(Layers[i].Neurons[j].influenceOutput.begin(), Layers[i].Neurons[j].influenceOutput.end(), 0);
}
}
}
//stampa a schermo l'influenza degli input per ogni uscita
void stampInputInfluences(bool all = false) {
cout << endl;
if (all == false) {
for (int i = 0; i < Layers[nLayers - 1].numNeurons; i++) {
cout << "Output (" << i << "): ";
for (int j = 0; j < Layers[nLayers - 1].Neurons[i].influenceInput.size(); j++) {
cout << "infl n in(" << j << "): " << Layers[nLayers - 1].Neurons[i].influenceInput[j] * 100 << "% || ";
}
cout << endl;
}
}
else {
int l = 0;
int c = 0;
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
cout << "neuron (" << i << ", " << j << ") input influence: ";
for (int k = 0; k < Layers[i].Neurons[j].influenceInput.size(); k++) {
l = basePosfromtarget(getTarget(i, j), k)->layer;
c = basePosfromtarget(getTarget(i, j), k)->column;
cout << " (" << l << ", " << c << "): " << Layers[i].Neurons[j].influenceInput[k] * 100 << "% || ";// non mostra la correlazione con il neurone ma solo in numero di arco posizionale per il neurone
}
cout << endl;
}
}
}
}
//stampa a schermo l'influenza degli errori degli output per ogni ingresso
void stampOutputErrorPropagation(bool all = false) {
cout << endl;
if (all == false) {
for (int i = 0; i < Layers[0].numNeurons; i++) {
cout << "input (" << i << ") BPerrore : ";
for (int j = 0; j < Layers[0].Neurons[i].influenceOutput.size(); j++) {
cout << " (" << j << "): " << Layers[0].Neurons[i].influenceOutput[j] * 100 << "% || ";
}
cout << endl;
}
}
else {
for (int i = 0; i < nLayers; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
cout << "neuron (" << i << ", " << j << "): ";
for (int k = 0; k < Layers[i].Neurons[j].influenceOutput.size(); k++) {
cout << "BPerr n out(" << k << "): " << Layers[i].Neurons[j].influenceOutput[k] * 100 << "% || "; // non mostra la correlazione con il neurone ma solo in numero di arco posizionale per il neurone
}
cout << endl;
}
}
}
}
// dal riferimento al nodo target e dall'indice del vettore di influenza restituisce il puntatore al neurone base
ptNeuron basePosfromtarget(ptNeuron target, int k) {
int k2 = -1;
for (int i = 0; i < target->layer; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int w = 0; w < numCon(i, j); w++) {
if (target == getConTarget(i, j, w)) {
k2++;
if (k2 == k) return getTarget(i, j);
}
}
}
}
return nullptr;
}
//dato un neurone e l'indice di un suo arco restituisce l'indice di quella connessione all'interno del vettore influenceInput all'interno del neurone target
int idBaseConReftoTargetInfl(ptNeuron base, int arc) {
ptNeuron target = base->OutArcs[arc].target;
int k2 = -1;
for (int i = 0; i < target->layer; i++) {
for (int j = 0; j < numNeurons(i); j++) {
for (int k = 0; k < numCon(i, j); k++) {
if (target == getConTarget(i, j, k)) {
k2++;
if (base == getTarget(i, j)) {
return k2;
}
}
}
}
}
return -1;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
};
class Hopfield : public Network {
public:
MLP* mlp; // puntatore alla rete da supportare
vector<interArc> binds; // connessioni tra rete mlp e Hopfiled
Hopfield(string filename, MLP *target = NULL) :Network(filename) {
if (target != NULL) {
mlp = target;
supportNet();
}
}
////////////////////////////////////////////////FUNZIONI CREAZIONE RETE Hopfield///////////////////////////////////////////////
//Rete ad anello //RICORDA i pesi sono settati automaticamente a 1 TODO implementa controllo da parametro
void toroidNet(int Nlayers, vector<int> Ncolumns, float conFill) {
//sviluppo simile alle reti quadrate FC ma con connessioni anche tra i neuroni di uno stesso strato
if (Ncolumns.size() != Nlayers) { cout << "ringNet() FAILED! parameters error" << endl; return; } //errore
//dichiarazione layers
Layers.resize(Nlayers);
nLayers = Nlayers;
//dichiarazione neuroni per layer
for (int i = 0; i < Nlayers; i++) {
Layers[i].Neurons.resize(Ncolumns[i]);
Layers[i].numNeurons = Ncolumns[i];
nNeurons += Ncolumns[i];
for (int j = 0; j < Layers[i].numNeurons; j++) {
Layers[i].Neurons[j].layer = i;
Layers[i].Neurons[j].column = j;
}
}
//dichiarazione connessioni in uscita di ogni neurone
for (int i = 0; i < nLayers; i++) {
//cout << "Layer " << i << endl;
for (int j = 0; j < Layers[i].numNeurons; j++) {
//cout << "Neuron " << j << endl;
int nFrontNeurons = 0; for (int g = i; g < nLayers; g++) nFrontNeurons += Layers[g].numNeurons;
Layers[i].Neurons[j].numOutArcs = conFill * nFrontNeurons;
Layers[i].Neurons[j].OutArcs.resize(conFill* nFrontNeurons);
vector<u_int> OutArcAcces = casualVector(Layers[i].Neurons[j].numOutArcs);
vector<u_int> NetAcces = casualVector(nFrontNeurons);
int x, y = 0;
while (y < OutArcAcces.size()) {
x = 0;
for (int k = i; k < nLayers; k++) {
for (int w = 0; w < Layers[k].numNeurons; w++) {
if (y > OutArcAcces.size() - 1) continue;
if (NetAcces[y] == x) {
Layers[i].Neurons[j].OutArcs[OutArcAcces[y]].target = &(Layers[k].Neurons[w]);
Layers[i].Neurons[j].OutArcs[OutArcAcces[y]].weight = 1.0f;
Layers[k].Neurons[w].numInArcs++;
y++; cout << y << endl;
}
x++;
}
}
}
//TOPPA elimino le connessioni dei neuroni con se stessi
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
if (Layers[i].Neurons[j].OutArcs[k].target->layer == i) {
if (Layers[i].Neurons[j].OutArcs[k].target->column == j) {
deleteArc(i, j, i, j);
}
}
}
}
}
}
//Genera una rete di supporto all'apprendimento su misura per una mlp
void supportNet() {
//creo la rete di supporto
nLayers = 1;
nNeurons = mlp->nNeurons;
nArc = mlp->nArc;
Layers.resize(1);
Layers[0].Neurons.resize(nNeurons);
Layers[0].numNeurons = nNeurons;
//collego la rete di supporto alla mlp
//e copio le connessioni dalla mlp sulla hopfield
binds.resize(mlp->nNeurons);
int bindPos = 0;
for (int i = 0; i < mlp->nLayers; i++) {
for (int j = 0; j < mlp->numNeurons(i); j++) {
binds[bindPos].base = getTarget(0, bindPos);
binds[bindPos].base->layer = 0;
binds[bindPos].base->column = bindPos;
binds[bindPos].target = mlp->getTarget(i, j);
binds[bindPos].base->numOutArcs = binds[bindPos].target->numOutArcs;
binds[bindPos].base->numInArcs = binds[bindPos].target->numInArcs;
binds[bindPos].base->OutArcs.resize(binds[bindPos].target->numOutArcs);
bindPos++;
}
}
for (int i = 0; i < binds.size(); i++) {
for (int k = 0; k < binds[i].target->numOutArcs; k++) {
int TargetPos = searchFromTarget(binds[i].target->OutArcs[k].target);
if (TargetPos != -1) {
binds[i].base->OutArcs[k].target = binds[TargetPos].base;
binds[i].base->OutArcs[k].weight = 1.0f;
}
}
}
}
//Genera una rete con connessioni completamente caotiche
void caoticNet() {} // TODO da sviluppare
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////FUNZIONI DI ADDESTRAMENTO//////////////////////////////////////////////////////
//la funzione considera i parametri della rete mlp associta per addestrare la memoria associativa
void trainSupportNet(float Wi, float We, int g) {
float infl = 0, BPerr = 0;
float Dinfl = 0, DbpErr = 0;
for (int i = 0; i < mlp->nLayers; i++) {
for (int j = 0; j < mlp->numNeurons(i); j++) {
for (int k = 0; k < mlp->numCon(i, j); k++) {
BPerr = mlp->getTarget(i, j)->influenceOutput[k];
infl = mlp->getTarget(i, j)->OutArcs[k].target->influenceInput[mlp->idBaseConReftoTargetInfl(mlp->getTarget(i, j), k)];
Dinfl = DeltaHW(Wi, infl, g);
DbpErr = DeltaHW(We, BPerr, g);
binds[searchFromTarget(mlp->getTarget(i, j))].base->OutArcs[k].weight += Dinfl + DbpErr;
}
}
}
}
//funzione che taglia gli archi con il peso sotto una certa soglia
void cutMadArc(int maxCut, int alfa) {
//!!!la procedura supporta soltanto reti ad anello su un solo layer!!!
//maxCut rappresenta il numero massimo di rami da tagliare in questa esecuzione
//alfa rappresenta il coefficiente di soglia percentuale sotto il quale il ramo puo essere tagliato (la soglia è una percentuale)
float Wsum = 0;
u_int Wn = 0;
float Wmax = std::numeric_limits<float>::min();
float Wmin = std::numeric_limits<float>::max();
for (int i = 0; i < numNeurons(0); i++) {
for (int j = 0; j < numCon(0, i); j++) {
Wsum += getTarget(0, i)->OutArcs[j].weight;
if (getTarget(0, i)->OutArcs[j].weight > Wmax) { Wmax = getTarget(0, i)->OutArcs[j].weight; }
if (getTarget(0, i)->OutArcs[j].weight < Wmin) { Wmax = getTarget(0, i)->OutArcs[j].weight; }
Wn++;
}
}
nArc = Wn;
mlp->nArc = nArc;
float Wmean = Wsum / Wn;
float treashold = ((Wmax - Wmin)*alfa) + Wmin; // alfa = 0 -> Treashold = Wmin , alfa = 1 -> Treashold = Wmax
int Tcol = 0, TmlpL = 0, TmlpC = 0, mlpL = 0, mlpC = 0;
vector <u_int> acs = casualVector(nNeurons - mlp->numNeurons(mlp->nLayers - 1));
for (int i = 0; i < acs.size(); i++) {
for (int j = 0; j < getTarget(0, acs[i])->numOutArcs; j++) {
if (getTarget(0, acs[i])->OutArcs[j].weight < treashold) { // condizione sufficiente all'eliminazione
Tcol = getTarget(0, acs[i])->OutArcs[j].target->column; //colonna del neurone Hopfield da eliminare
TmlpL = binds[serchFromBase(getTarget(0, acs[i])->OutArcs[j].target)].target->layer; // numLayer del neurone mlp da eliminare
TmlpC = binds[serchFromBase(getTarget(0, acs[i])->OutArcs[j].target)].target->column; // numCol del neurone mlp da eliminare
mlpL = binds[serchFromBase(getTarget(0, acs[i]))].target->layer; // numLayer del neurone mlp contenente la connessione
mlpC = binds[serchFromBase(getTarget(0, acs[i]))].target->column; // numCol del neurone mlp contenente la connessione
// procedura di eliminazione arco
if (getTarget(0, acs[i])->OutArcs[j].target->numInArcs == 1) { // se il neurone obbiettivo ha solo l'arco da eliminare elimino anche il corrispondente elemento bind
//binds.erase(binds.begin() + serchFromBase(getTarget(0, acs[i])->OutArcs[j].target)); //cerco e elimino l'elemento bind dei due neuroni
//deleteArc(0, acs[i], 0, Tcol); // elimino l'arco nella rete Hopfield
//mlp->deleteArc(mlpL, mlpC, TmlpL, TmlpC); //elimino l'arco nella rete mlp
//TODO utilizza funzioni apposite per l'eliminazione dei neuroni delle due reti
nArc++;
}
else { //altrimenti elimino semplicemente l'arco nelle due reti
deleteArc(0, acs[i], 0, Tcol); // elimino l'arco nella rete Hopfield
mlp->deleteArc(mlpL, mlpC, TmlpL, TmlpC); //elimino l'arco nella rete mlp
}
nArc--;
mlp->nArc = nArc;
maxCut--;
if (maxCut < 1) return;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////FUNZIONI MODIFICA RETE///////////////////////////////////////////////////////////
void HdeleteArc(int Nlayer, int Ncolumn, int targetLayer, int targetColumn) {
for (int i = 0; i < Layers[Nlayer].Neurons[Ncolumn].numOutArcs; i++) {
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->layer == targetLayer && Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->column == targetColumn) {
//modifico i parametri della rete relazionati a tale arco
Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->numInArcs--;
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->influenceInput.size() > 0)Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->influenceInput.pop_back();
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->influenceOutput.size() > 0)Layers[Nlayer].Neurons[Ncolumn].influenceOutput.pop_back();
if (Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->numInArcs == 0 && Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->layer != 0) {
deleteNeuron(Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->layer, Layers[Nlayer].Neurons[Ncolumn].OutArcs[i].target->column);
}
else {
Layers[Nlayer].Neurons[Ncolumn].OutArcs.erase(Layers[Nlayer].Neurons[Ncolumn].OutArcs.begin() + i);
Layers[Nlayer].Neurons[Ncolumn].numOutArcs--;
}
return;
}
}
cout << "errore: arco (" << Nlayer << "-" << Ncolumn << ") -> (" << targetLayer << "-" << targetColumn << ") non trovato, eliinazione fallita" << endl;
}
void HdeleteArcByRef(int Nlayer, int Ncolumn, arc *targetArc) { //elimina un arco con gli indici della base e il puntatore alla connsessione
//modifico i parametri della rete relazionati a tale arco
targetArc->target->numInArcs--;
if (targetArc->target->influenceInput.size() > 0)targetArc->target->influenceInput.pop_back();
if (Layers[Nlayer].Neurons[Ncolumn].influenceOutput.size() > 0)Layers[Nlayer].Neurons[Ncolumn].influenceOutput.pop_back();
if (targetArc->target->numInArcs == 0) {
deleteNeuron(targetArc->target->layer, targetArc->target->column);
}
else {
Layers[Nlayer].Neurons[Ncolumn].OutArcs.erase(Layers[Nlayer].Neurons[Ncolumn].OutArcs.begin() + getOutConTargetID(getTarget(Nlayer, Ncolumn), targetArc->target));
Layers[Nlayer].Neurons[Ncolumn].numOutArcs--;
}
}
void HdeleteNeuron(int Nlayer, int Ncolumn) {
// elimino l'arco dai neuroni che puntano al nodo da eliminare e
// decremento il numero di archi in output hai neuroni con come target il neurone da eliminare
for (int i = 0; i < Nlayer; i++) {
for (int j = 0; j < Layers[i].numNeurons; j++) {
for (int k = 0; k < Layers[i].Neurons[j].numOutArcs; k++) {
if (Layers[i].Neurons[j].OutArcs[k].target->layer == Nlayer && Layers[i].Neurons[j].OutArcs[k].target->column == Ncolumn) {
deleteArcByRef(i, j, &Layers[i].Neurons[j].OutArcs[k]);
}
}
}
}
// eliminio gli archi del neurone da eliminare
for (int i = 0; i < Layers[Nlayer].Neurons[Ncolumn].numOutArcs; i++) {
deleteArcByRef(Nlayer, Ncolumn, &Layers[Nlayer].Neurons[Ncolumn].OutArcs[i]);
}
// decremento l'indice colonna dei neuroni successivi nello stesso layer
for (int i = Ncolumn + 1; i < Layers[Nlayer].numNeurons; i++) Layers[Nlayer].Neurons[i].column--;
// elimino il neurone
Layers[Nlayer].Neurons.erase(Layers[Nlayer].Neurons.begin() + Ncolumn);
Layers[Nlayer].numNeurons--;
nNeurons--;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////ALTRE FUNZIONI/////////////////////////////////////////////////////////////////
int serchFromBase(ptNeuron base) {
for (int i = 0; i < binds.size(); i++) {
if (base == binds[i].base) {
return i;
}
}
return -1;
}
int searchFromTarget(ptNeuron target) {
for (int i = 0; i < binds.size(); i++) {
if (target == binds[i].target) {
return i;
}
}
return -1;
}
//funzione per il condizionamento dei valori dei vettori di retropropagazione
float DeltaHW(float W, float x, float g) {
return W * powf((2 * x - 1), (1 + 2 * g));
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
};
class StructuralLearning {
private:
MLP * mlp; // rete mlp
Hopfield* hpd; // rete Hopfield
public:
StructuralLearning(MLP* ptMlp, Hopfield* ptHpd) { //costruttore
mlp = ptMlp;
hpd = ptHpd;
mlp->initVectorsProfiler();
}
void StructuralBP(int iter, float eps, float beta, float ErrPercent, float Wi, float We, int g, int cutStart, float alfa, float SmaxErr, float maxWloss) {
int nWt0 = mlp->nArc; // numero dei pesi al tempo t0
int nWtn = 0; // numero dei pesi rimasti all'i-esima iterazzione
float WdelP = 0; // percentuale dei pesi eliminati dall'inizio dell'addestramento
float err = 0; // errore del singolo neurone
float Err = 0; // errore quadratio del singolo esempio
float Gerr = 0; // errore quadratico dell'intero dataset
float Perr = 0;
float delta = 0;
long long t0 = 0;
long long t1 = 0;
long long inT1 = 0;
double elapsedMilliseconds = 0;
double executionTime = 0;
double inputTime = 0;
int x = 0;
vector<float> Out(mlp->numNeurons(mlp->nLayers - 1)); // vettore supporto output rete
vector<float> GerrStory;
for (int t = 0; t < iter; t++) {
Gerr = 0;
Perr = 0;
for (int e = 0; e < mlp->examples.size(); e++) {
t0 = mlp->PerformanceCounter();
// eseguo l'esempio
mlp->inputNetProfiler(mlp->examples[e].input, Out);
t1 = mlp->PerformanceCounter();
elapsedMilliseconds = ((t1 - t0) * 1000.0) / mlp->PerformanceFrequency();
inputTime += elapsedMilliseconds;
// clacolo il delta delle variazioni dei pesi dello strato di output
Err = 0;
for (int r = 0; r < Out.size(); r++) {
delta = mlp->map[r].maxValue - mlp->map[r].minValue;
err = Out[r] - mlp->examples[e].Doutput[r];
Perr += abs(err / mlp->examples[e].Doutput[r]) * 100; // errore percentuale
Err += pow(err, 2);
if (x == e && t > -1) {
cout << endl << "es." << x << " Y" << r << " = " << Out[r] << " " << "D" << r << " = " << mlp->examples[e].Doutput[r] << " err: " << abs(Out[r] - mlp->examples[e].Doutput[r]) << endl;
if (r == Out.size() - 1) {
x--;
mlp->WeightsStamp("fc");
/*mlp->stampInputInfluences(true);
mlp->stampOutputErrorPropagation(true);*/ //erroi retropropagati nell'iterazione precedente
/*Sleep(5000);*/
}
if (x < 0) x = mlp->examples.size() - 1;
}
mlp->resetAbsSumDelta();
//mErr = ((err- map[r].minValue)/ delta) ;
// calcolo il delta della variazione dei pesi
mlp->Layers[mlp->nLayers - 1].Neurons[r].BPerr = mlp->reverseMap(r, err) * (mlp->reverseMap(r, Out[r]) *(1 - mlp->reverseMap(r, Out[r])));
//applico le correzioni ai bayes
mlp->Layers[mlp->nLayers - 1].Neurons[r].oldBayesDelta = (-eps * (mlp->Layers[mlp->nLayers - 1].Neurons[r].BPerr)) + beta * mlp->Layers[mlp->nLayers - 1].Neurons[r].oldBayesDelta;
mlp->Layers[mlp->nLayers - 1].Neurons[r].bayes += mlp->Layers[mlp->nLayers - 1].Neurons[r].oldBayesDelta;
}
//applico le correzioni ai pesi dello strato di output
for (int i = 0; i < mlp->numNeurons(mlp->nLayers - 2); i++) {
for (int j = 0; j < mlp->numCon(mlp->nLayers - 2, i); j++) {
mlp->Layers[mlp->nLayers - 2].Neurons[i].OutArcs[j].oldDelta = (-eps * (mlp->Layers[mlp->nLayers - 2].Neurons[i].OutArcs[j].target->BPerr) * mlp->Layers[mlp->nLayers - 2].Neurons[i].output) + beta * mlp->Layers[mlp->nLayers - 2].Neurons[i].OutArcs[j].oldDelta;
mlp->Layers[mlp->nLayers - 2].Neurons[i].OutArcs[j].weight += mlp->Layers[mlp->nLayers - 2].Neurons[i].OutArcs[j].oldDelta;
mlp->Layers[mlp->nLayers - 2].Neurons[i].absDeltaSum += abs(mlp->Layers[mlp->nLayers - 2].Neurons[i].OutArcs[j].oldDelta);
}
}
// rieseguo la procedura per tutti gli altri strati
for (int i = mlp->nLayers - 2; i > 0; i--) { // dal penultimo strato al secondo
// clacolo il delta delle variazioni dei pesi dello strato i-1
for (int j = 0; j < mlp->numNeurons(i); j++) {
err = 0;
for (int k = 0; k < mlp->numCon(i, j); k++) err += mlp->Layers[i].Neurons[j].OutArcs[k].target->BPerr * mlp->Layers[i].Neurons[j].OutArcs[k].weight;
mlp->Layers[i].Neurons[j].BPerr = mlp->DsigOut(i, j)*err;
//applico le correzioni ai bayes
mlp->Layers[i].Neurons[j].oldBayesDelta = (-eps * (mlp->Layers[i].Neurons[j].BPerr)) + beta * mlp->Layers[i].Neurons[j].oldBayesDelta;
mlp->Layers[i].Neurons[j].bayes += mlp->Layers[i].Neurons[j].oldBayesDelta;
}
// applico le correzioni ai pesi dello strato i-1
for (int j = 0; j < mlp->numNeurons(i - 1); j++) {
for (int k = 0; k < mlp->numCon(i - 1, j); k++) {
mlp->Layers[i - 1].Neurons[j].OutArcs[k].oldDelta = (-eps * (mlp->Layers[i - 1].Neurons[j].OutArcs[k].target->BPerr) * mlp->Layers[i - 1].Neurons[j].output) + beta * mlp->Layers[i - 1].Neurons[j].OutArcs[k].oldDelta;
mlp->Layers[i - 1].Neurons[j].OutArcs[k].weight += mlp->Layers[i - 1].Neurons[j].OutArcs[k].oldDelta;
mlp->Layers[i - 1].Neurons[j].absDeltaSum += abs(mlp->Layers[i - 1].Neurons[j].OutArcs[k].oldDelta);
}
}
}
mlp->resetVectorsProfiler(false, true);
//calcolo dei vettori di influenza dell'errore retropropagato da ogni output
for (int i = mlp->nLayers - 2; i >= 0; i--) {
for (int j = 0; j < mlp->numNeurons(i); j++) {
for (int k = 0; k < mlp->numCon(i, j); k++) {
mlp->getTarget(i, j)->influenceOutput[k] += abs((mlp->getTarget(i, j)->OutArcs[k].oldDelta) / (mlp->getTarget(i, j)->absDeltaSum));
}
}
}
///////////////////////////////////sezione dell'algoritmo in cui la rete ha i vettori di propagazione caricati/////////////////////////
hpd->trainSupportNet(Wi, We, g);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Gerr += Err;
mlp->resetPotential(); // azzero i potenziali di output della rete
mlp->resetAbsSumPotenzial(); //azzero le sommatorie in valore assoluto dei contributi delle connessioni nell'output del neurone
t1 = mlp->PerformanceCounter();
elapsedMilliseconds = ((t1 - t0) * 1000.0) / mlp->PerformanceFrequency();
executionTime += elapsedMilliseconds;
}
///////////////////////////////////// procedura di eliminazione archi ////////////////////////////////////////////////////
if (t > cutStart) {
if (t == cutStart + 1) nWt0 = mlp->nArc;
WdelP = 1.0f - (mlp->nArc / (float)nWt0);
WdelP = WdelP * 100.0f;
if (mlp->NetErrPercent > SmaxErr || WdelP >= maxWloss) {
if (mlp->NetErrPercent > SmaxErr) cout << "error reached: " << mlp->NetErrPercent << endl;
if (WdelP >= maxWloss) cout << "weights lossed: " << WdelP << "%" << endl;
return;
}
hpd->cutMadArc(1, alfa);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Gerr /= 2;
Perr /= mlp->examples.size()*Out.size();
executionTime /= mlp->examples.size();
inputTime /= mlp->examples.size();
mlp->NetPerformance = inputTime;
mlp->NetErrPercent = Perr;
cout << "Iterazione " << t << " errore quadratico: " << Gerr << " errore percentuale medio: " << Perr << " %err" << endl
<< "exemple time: " << executionTime << " ms inputTime: " << inputTime << " ms" << endl
<< "Net integrity:" << (mlp->nArc / (float)nWt0) * 100 << "% arcs: " << mlp->nArc << endl;
//if (t > 100 && t < 200)Sleep(500);
if (Perr < ErrPercent) {
cout << "Percentuale di errore obbiettivo raggiunta!" << endl;
return;
}
}
}
};
////////////////////////////////////////////////////////////////CUDA Kernels//////////////////////////////////////////////////////////
//resetta il valore di una variabile all'interno della scheda grafica
__global__ void CUDAresetVar(float *val) {
*val = 0;
}
//applica ad ogni arco della rete la correzione del peso
__global__ void CUDAapplyWeightCorrections(float eps, float *NeuronOut, float *BPerr, float *weights, int *ArcIn, int *ArcOut, int nArcs) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < nArcs) {
weights[i] += -eps * BPerr[ArcIn[i]] * NeuronOut[ArcOut[i]];
}
}
__global__ void CUDAapplyBayesCorrections(float eps, float *BPerr, float *Bayes, int startN, int endN) {
unsigned int i = startN + (blockIdx.x * blockDim.x) + threadIdx.x;
if (i <= endN) {
Bayes[i] += -eps * BPerr[i];
}
}
//applica alla sommatoria degli errori pesati e retropropagati ad ogni neurone la derivata puntuale della sigmoide
//DEPRECATED!!!
/*__global__ void CUDAapplayDsigToBPerr(float *NeuronOut, float *BPerr, int nNeuron) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < nNeuron) {
BPerr[i] *= NeuronOut[i] * (1 - NeuronOut[i]);
}
}*/
//retropropaga l'errore nella rete
__global__ void CUDAPropagationErr(float *BPerr, float *weights, float *NeuronOut, int *ArcIn, int *ArcOut, int startA, int endA) {
unsigned int i = startA + (blockIdx.x * blockDim.x) + threadIdx.x;
//retropropago l'errore dai neuroni successivi
if (i <= endA) {
//BPerr[ArcOut[i]] += BPerr[ArcIn[i]] * weights[i];
atomicAdd(&BPerr[ArcOut[i]], BPerr[ArcIn[i]] * weights[i]);
}
}
__global__ void CUDAoutDiff(float *BPerr, float *NeuronOut, int startN, int endN) {
int i = startN + (blockIdx.x * blockDim.x) + threadIdx.x;
if (i <= endN) {
BPerr[i] *= NeuronOut[i] * (1 - NeuronOut[i]);
}
}
//calcola l'errore dei neuroni dello strato output
__global__ void CUDAoutputErr(float *NeuronOut, int OutputRef, int numNeurons, int inputN, float *BPerr, float *examples, int exampleRef, float *mapMaxOut, float *mapMinOut, float *MeanErr) {
unsigned int i = (OutputRef) + (blockIdx.x * blockDim.x) + threadIdx.x; //indice di scorrimento vettori: NeuronOut, BPerr,
unsigned int e = (exampleRef + inputN) + (blockIdx.x * blockDim.x) + threadIdx.x; //indice di scorrimento vettori: examples
unsigned int m = (blockIdx.x * blockDim.x) + threadIdx.x; // indice di scorrimento vettori: mapMaxOut, mapMinOut
//if (i == 0) *MeanErr = 0;
if (i < numNeurons) {
float delta = mapMaxOut[m] - mapMinOut[m];
BPerr[i] = (NeuronOut[i] - ((examples[e] - mapMinOut[m]) / delta)) * NeuronOut[i] * (1 - NeuronOut[i]); // formula valida solo per i neuroni di uscita
//atomicAdd(MeanErr, (abs((((NeuronOut[i] * delta) + mapMinOut[m]) - examples[e]) / examples[e]))*100.0f);
atomicAdd(MeanErr, abs((((NeuronOut[i] * delta) + mapMinOut[m]) - examples[e])/ examples[e]) * 100.0f);
//*MeanErr += abs((examples[e] - ((NeuronOut[i] * delta) + mapMinOut[m])) / examples[e]); // calcolo l'errore percentuale sulla singola uscita e lo sommo
}
}
//resetta un dato vettore
__global__ void CUDAresetVector(float *vect, int size) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < size) vect[i] = 0.0f;
}
//imposta i valori di output dei neuroni di input al valore dell'esempio
__global__ void CUDAsetInput(float *NeuronOut, int inputN, int exampleRef, float *example) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < inputN)NeuronOut[i] = example[exampleRef + i];
}
__global__ void CUDAsetSingleInput(float *NeuronOut, int inputN, float *example) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < inputN)NeuronOut[i] = example[i];
}
//applica la sigmoide ai potenziali dei neuroni in un dato intervallo
__global__ void CUDAsigLayer(float *NeuronOut, int start, int end) {
unsigned int i = start + (blockIdx.x * blockDim.x) + threadIdx.x;
if (i <= end) {
NeuronOut[i] = 1 / (1 + expf(-NeuronOut[i]));
}
}
//aggiunge all'output del neurone il contributo del bayes
__global__ void CUDAbayesInput(float *NeuronOut, float *Bayes, int start, int end) {
unsigned int i = start + (blockIdx.x * blockDim.x) + threadIdx.x;
if (i <= end) {
NeuronOut[i] += Bayes[i];
}
}
//propaga l'informazione dai neuroni dello strato input a quello di output
//TODO sostituire l'utilizzo di atomicAdd con la riduzione delle somme (utilizando atomicAdd il minor numero possibile di volte per ogni neurone)
__global__ void CUDAlayerInput(float *weights, int *ArcIn, int *ArcOut, float *NeuronOut, int start, int end) {
unsigned int i = start + (blockIdx.x * blockDim.x) + threadIdx.x;
if (i <= end) {
atomicAdd(&NeuronOut[ArcIn[i]], NeuronOut[ArcOut[i]] * weights[i]); //addizione bloccante non permette ad altri thread di sovrascrivere il falore finche l'operazione non è completata
//printf("Neurone %d ( %f ) += Neuron %d ( %f ) * peso ( %f ) \n", ArcIn[i], NeuronOut[ArcIn[i]], ArcOut[i], NeuronOut[ArcOut[i]], weights[i]);
//NeuronOut[ArcIn[i]] += NeuronOut[ArcOut[i]] * weights[i];
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class CUDAcore {
//api di interfacciamento alla GPU
private:
public:
//TODO aggiungere il vettore dei bayes e relativa funzione di applicazione e correzione
cudaDeviceProp prop; //Device specs struct
int GpuID = 0;
//struttura contenente i puntatori alle aree di memoria conenenti i parametri della rete nella GPU
struct devNetParams {
float *weights = 0;
int *ArcIn = 0;
int *ArcOut = 0;
float *examples = 0;
float *NeuronOut = 0;
float *Bayes = 0;
float *BPerr = 0;
float *mapMaxOut = 0;
float *mapMinOut = 0;
int *NeurInLyr = 0;
int *priority = 0;
float *MeanErr = 0;
float *InputRT = 0;
}gpuNetParams;
vector<float> weights; //pei della rete
vector<int> ArcIn; //target dell'n-esimo arco
vector<int> ArcOut; //base dell'n-esimo arco
vector<float> NeuronOut; //vettore contenente l'output dei neuroni
vector<float> Bayes; //vettore contenente i bayes dei neuroni
vector<float> BPerr; // vettore contenete gli errori retropropagati
vector<float> mapMaxOut; //vettore contenente il massimo valore degli output
vector<float> mapMinOut; //vettore contenente il minimo valore degli output
vector<int> priority; // vettore contenente i punti di sincronizazione dei thread
vector<int> NeurInLyr; //vettore contenente gli indici dell'ultimo neurone di ogni layer
vector<float>examples; //vettore degli esempi
float MeanErr = 0; //veriabile contenente l'errore medio percentuale della rete
int inputN, outputN; //passo di esecuzione elementi del vettore esempi
CUDAcore(int nGpu) {
GpuID = nGpu;
checkCuda(cudaGetDeviceProperties(&prop, nGpu)); // carica lo struct cudaDeviceProp prop con le caratteristiche della GPU con indice 0
}
/*per convertire gli oggetti vector in Array
std::vector<double> v;
double* a = &v[0];
*/
void cudaNetCopyMLP(MLP *pt) {
cout << "copying the net into CUDAcore.." << endl;
weights.resize(pt->nArc);
ArcIn.resize(pt->nArc);
ArcOut.resize(pt->nArc);
NeuronOut.resize(pt->nNeurons);
Bayes.resize(pt->nNeurons);
BPerr.resize(pt->nNeurons);
priority.resize(pt->nLayers + 1);
NeurInLyr.resize(pt->nLayers + 1);
mapMaxOut.resize(pt->map.size());
mapMinOut.resize(pt->map.size());
inputN = pt->numNeurons(0);
outputN = pt->numNeurons(pt->nLayers - 1);
int NeuronIdx = 0;
int ArcIdx = 0;
vector<int> neurons(pt->nLayers);
//carico il vettore di mappatura dell'output della rete
for (int i = 0; i < pt->map.size(); i++) {
mapMaxOut[i] = pt->map[i].maxValue;
mapMinOut[i] = pt->map[i].minValue;
}
NeurInLyr[0] = -1; // setto il primo valore
priority[0] = -1; // setto il primo valore
//carico i parametri della rete
for (int i = 0; i < pt->nLayers; i++) {
for (int j = 0; j < pt->numNeurons(i); j++) {
Bayes[NeuronIdx] = pt->getTarget(i, j)->bayes;
for (int k = 0; k < pt->numCon(i, j); k++) {
weights[ArcIdx] = pt->getTarget(i, j)->OutArcs[k].weight;
ArcIn[ArcIdx] = pt->getTarget(i, j)->OutArcs[k].target->neurIdx;
ArcOut[ArcIdx] = pt->getTarget(i, j)->neurIdx;
ArcIdx++;
}
NeuronIdx++;
}
NeurInLyr[i + 1] = NeuronIdx - 1; // salvo l'indice dell'ultimo neurone del layer corrente
priority[i + 1] = ArcIdx - 1; // salvo l'indice dell'ultimo arco del layer corrente
}
}
void cudaNetPasteMLP(MLP *pt) {
int idx = 0;
int Nidx = 0;
for (int i = 0; i < pt->nLayers; i++) {
for (int j = 0; j < pt->numNeurons(i); j++) {
pt->getTarget(i, j)->bayes = Bayes[Nidx++];
for (int k = 0; k < pt->numCon(i, j); k++) {
pt->getTarget(i, j)->OutArcs[k].weight = weights[idx++];
}
}
}
}
void cudaNetCopyHopfield(Hopfield* pt) {
}
void cudaNetCopyExamples(MLP *pt) {
cout << "copying example into CUDAcore" << endl;
examples.resize(pt->examples.size()*(pt->numNeurons(0) + pt->numNeurons(pt->nLayers - 1)));
int idx = 0;
for (int i = 0; i < pt->examples.size(); i++) {
for (int j = 0; j < pt->numNeurons(0); j++) {
examples[idx++] = pt->examples[i].input[j];
}
for (int j = 0; j < pt->numNeurons(pt->nLayers - 1); j++) {
examples[idx++] = pt->examples[i].Doutput[j];
}
}
}
////////////////////////////////////////////////////////////////CUDA Kernel functions/////////////////////////////////////////////////
//esegue le operazioni di allocamento memoria e preparazione al lancio del kernel di propagazione della rete
cudaError_t hostCUDAtrainingNet(float eps, int Niter, int ThxBlock) {
cout << "learning is started!" << endl;
//host variables
float *Cweights = &weights[0];
int *CArcIn = &ArcIn[0];
int *CArcOut = &ArcOut[0];
float *CNeuronOut = &NeuronOut[0];
float *CBayes = &Bayes[0];
float *CBPerr = &BPerr[0];
float *CmapMaxOut = &mapMaxOut[0];
float *CmapMinOut = &mapMinOut[0];
float *Cexamples = &examples[0];
int *CNeurInLyr = &NeurInLyr[0];
int *Cpriority = &priority[0];
float *CMeanErr = &MeanErr;
//device variables
float *dev_weights = 0;
int *dev_ArcIn = 0;
int *dev_ArcOut = 0;
float *dev_examples = 0;
float *dev_NeuronOut = 0;
float *dev_Bayes = 0;
float *dev_BPerr = 0;
float *dev_mapMaxOut = 0;
float *dev_mapMinOut = 0;
int *dev_NeurInLyr = 0;
int *dev_priority = 0;
float *dev_MeanErr = 0;
//int ThxBlock = 1024;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(GpuID);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
// Allocate GPU buffers for vectors
cudaStatus = cudaMalloc((void**)&dev_weights, weights.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&dev_ArcIn, ArcIn.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&dev_ArcOut, ArcOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&dev_NeuronOut, NeuronOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&dev_Bayes, Bayes.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&dev_BPerr, BPerr.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&dev_mapMaxOut, mapMaxOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&dev_mapMinOut, mapMinOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&dev_examples, examples.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&dev_NeurInLyr, NeurInLyr.size() * sizeof(int));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&dev_priority, priority.size() * sizeof(int));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&dev_MeanErr, sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_weights, Cweights, weights.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(dev_ArcIn, CArcIn, ArcIn.size() * sizeof(int), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(dev_ArcOut, CArcOut, ArcOut.size() * sizeof(int), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(dev_NeuronOut, CNeuronOut, NeuronOut.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(dev_Bayes, CBayes, Bayes.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(dev_BPerr, CBPerr, BPerr.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(dev_mapMaxOut, CmapMaxOut, mapMaxOut.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(dev_mapMinOut, CmapMinOut, mapMinOut.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(dev_examples, Cexamples, examples.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(dev_NeurInLyr, CNeurInLyr, NeurInLyr.size() * sizeof(int), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(dev_priority, Cpriority, priority.size() * sizeof(int), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(dev_MeanErr, CMeanErr, sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
//////////////////lancio dei kernel all'interno della gpu////////////////
int startA = 0;
int endA = 0;
int startN = 0;
int endN = 0;
int numLayerArcs = 0;
int numLayerNeur = 0;
int numOfBlocksMax = 0;
int numOfBlocksA = 0;
int numOfBlocksN = 0;
int numOfBlocksOut = floorf(outputN / ThxBlock) + 1;
int exampleRef = 0;
int outputRef = NeuronOut.size() - outputN;
long long t0 = 0, t1 = 0;
long long t0in = 0, t1in = 0;
double elapsedMilliseconds = 0;
double elapsedInMilliseconds = 0;
for (int it = 0; it < Niter; it++) { //scorro le iterazioni
t0 = PerformanceCounter();
for (int t = 0; t < (examples.size() / (inputN + outputN)); t++) { //scorro gli esempi
//imposto il riferimento per l'esempio di input
exampleRef = t * (inputN + outputN);
t0in = PerformanceCounter();
//resetto il vettore contenente lo stato di attivazione dei neuroni
numOfBlocksA = (floorf(NeuronOut.size() / ThxBlock) + 1);
CUDAresetVector << <numOfBlocksA, ThxBlock >> > (dev_NeuronOut, NeuronOut.size());
//imposto i valori di input ai neuroni dello strato input
numOfBlocksA = (floorf(inputN / ThxBlock) + 1);
CUDAsetInput << <numOfBlocksA, ThxBlock >> > (dev_NeuronOut, inputN, exampleRef, dev_examples);
//propagazione dell'input nella rete
startA = 0; // indice di partenza dei vettori archi
endA = 0; // ultimo indice dei vettori archi
startN = 0; // indice di partenza dei vettori neuroni
endN = 0; // ultimo indice dei vettori neuroni
for (int i = 0; i < priority.size() - 1; i++) { //NB non viene applicata la sigmoide allo strato di input eventulmente correggi
startA = priority[i] + 1;
endA = priority[i + 1];
if (i < priority.size() - 2) {
startN = NeurInLyr[i + 1] + 1;
endN = NeurInLyr[i + 2];
}
numLayerArcs = endA - startA + 1;
numLayerNeur = endN - startN + 1;
numOfBlocksA = floorf(numLayerArcs / ThxBlock) + 1;
numOfBlocksN = floorf(numLayerNeur / ThxBlock) + 1;
if (i < priority.size() - 2) {
CUDAlayerInput << <numOfBlocksA, ThxBlock >> > (dev_weights, dev_ArcIn, dev_ArcOut, dev_NeuronOut, startA, endA); //propago l'output dei neuroni al prossimo/i layer
CUDAbayesInput << < numOfBlocksN, ThxBlock >> > (dev_NeuronOut, dev_Bayes, startN, endN); //applico il contributo dei bayes all output dei neuroni del layer corrente
CUDAsigLayer << <numOfBlocksN, ThxBlock >> > (dev_NeuronOut, startN, endN); //applico la sigmoide allo stato di attivazione dei neuroni
}
}
t1in = PerformanceCounter();
elapsedInMilliseconds += ((t1in - t0in) * 1000.0) / PerformanceFrequency();
//resetto il vettore contenente l'errore dei neuroni
numOfBlocksN = (floorf(BPerr.size() / ThxBlock) + 1);
CUDAresetVector << <numOfBlocksN, ThxBlock >> > (dev_BPerr, BPerr.size());
CUDAresetVar <<<1, 1 >>> (dev_MeanErr);
CUDAoutputErr << <numOfBlocksOut, ThxBlock >> > (dev_NeuronOut, outputRef, NeuronOut.size(), inputN, dev_BPerr, dev_examples, exampleRef, dev_mapMaxOut, dev_mapMinOut, dev_MeanErr);
cudaMemcpy(CMeanErr, dev_MeanErr, sizeof(float), cudaMemcpyDeviceToHost);
////////////////////////////////////visualizzazione dell'esempio///////////////////////////////////
if (en == t) {
cudaStatus = cudaMemcpy(CNeuronOut, dev_NeuronOut, NeuronOut.size() * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
copy(CNeuronOut, CNeuronOut + NeuronOut.size(), NeuronOut.begin());
cout << "esempio " << en << endl;
for (int on = 0; on < outputN; on++) {
delta = mapMaxOut[on] - mapMinOut[on];
cout << "Y" << on << ": " << (NeuronOut[NeuronOut.size() - outputN + on] * delta) + mapMinOut[on] << " D" << on << ": " << examples[exampleRef + inputN + on] << endl;
}
cout << endl;
en--;
if (en < 0)en = (examples.size() / (inputN + outputN)) - 1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
MeanErr += *CMeanErr / outputN;
//retropropagazione dell'errore
for (int i = priority.size() - 2; i > 1; i--) {
startA = priority[i - 1] + 1;
endA = priority[i];
startN = NeurInLyr[i - 1] + 1;
endN = NeurInLyr[i];
numLayerArcs = endA - startA + 1;
numLayerNeur = endN - startN + 1;
numOfBlocksA = floorf(numLayerArcs / ThxBlock) + 1;
numOfBlocksN = floorf(numLayerNeur / ThxBlock) + 1;
//numOfBlocksMax = maxOf(numOfBlocksA, numOfBlocksN);
CUDAPropagationErr <<<numOfBlocksA, ThxBlock >>> (dev_BPerr, dev_weights, dev_NeuronOut, dev_ArcIn, dev_ArcOut, startA, endA);
CUDAoutDiff <<<numOfBlocksN, ThxBlock >>> (dev_BPerr, dev_NeuronOut, startN, endN);
cudaStatus = cudaMemcpy(CBPerr, dev_BPerr, BPerr.size() * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
copy(CBPerr, CBPerr + BPerr.size(), BPerr.begin());
}
//applico a ogni peso la sua correzione
startN = NeurInLyr[1] + 1; // la correzione dei bais va applicata dal primo layer nascosto in poi
endN = NeurInLyr[NeurInLyr.size() - 1];
numLayerNeur = endN - startN + 1;
numOfBlocksA = floorf(weights.size() / ThxBlock) + 1;
numOfBlocksN = floorf(numLayerNeur / ThxBlock) + 1;
CUDAapplyWeightCorrections << <numOfBlocksA, ThxBlock >> > (eps, dev_NeuronOut, dev_BPerr, dev_weights, dev_ArcIn, dev_ArcOut, weights.size());
CUDAapplyBayesCorrections << <numOfBlocksN, ThxBlock >> > (eps, dev_BPerr, dev_Bayes, startN, endN);
}
t1 = PerformanceCounter();
elapsedMilliseconds = ((t1 - t0) * 1000.0) / PerformanceFrequency(); // calcolo il tempo di esecuzione di una iterazione di addestramento (tutto il set)
MeanErr = MeanErr / (examples.size() / (inputN + outputN)); //calcolo l'errore percentuale medio sul dataset
elapsedInMilliseconds = elapsedInMilliseconds / (examples.size() / (inputN + outputN));
cout << "Iterazione: " << it << " " << MeanErr << " %Err " << "execution time:" << elapsedMilliseconds << "ms" << endl;
cout << "mean InputTime: " << elapsedInMilliseconds << "ms" << endl;
printNetSpecs();
MeanErr = 0;
}
cudaStatus = cudaMemcpy(Cweights, dev_weights, weights.size() * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
copy(Cweights, Cweights + weights.size(), weights.begin());
cudaStatus = cudaMemcpy(CBayes, dev_Bayes, Bayes.size() * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
copy(CBayes, CBayes + Bayes.size(), Bayes.begin());
//checkpoint di errore (se la GPU richiama un qualunque errore ripare da qui)
Error:
//libero la memoria nella scheda grafica
cudaFree(dev_weights);
cudaFree(dev_ArcIn);
cudaFree(dev_ArcOut);
cudaFree(dev_NeuronOut);
cudaFree(dev_examples);
cudaFree(dev_BPerr);
cudaFree(dev_mapMaxOut);
cudaFree(dev_mapMinOut);
cudaFree(dev_priority);
cudaFree(dev_NeurInLyr);
//ritorno lo stato della GPU
return cudaStatus;
}
//esegue il caricamento nella gpu dei parametri della rete
cudaError_t hostCUDAuploadNetParams() {
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(GpuID);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
//host variables
float *Cweights = &weights[0];
int *CArcIn = &ArcIn[0];
int *CArcOut = &ArcOut[0];
float *CNeuronOut = &NeuronOut[0];
float *CBayes = &Bayes[0];
float *CBPerr = &BPerr[0];
float *CmapMaxOut = &mapMaxOut[0];
float *CmapMinOut = &mapMinOut[0];
float *Cexamples = &examples[0];
int *CNeurInLyr = &NeurInLyr[0];
int *Cpriority = &priority[0];
float *CMeanErr = &MeanErr;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(GpuID);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
// Allocate GPU buffers for vectors
cudaStatus = cudaMalloc((void**)&gpuNetParams.weights, weights.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&gpuNetParams.ArcIn, ArcIn.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&gpuNetParams.ArcOut, ArcOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&gpuNetParams.NeuronOut, NeuronOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&gpuNetParams.Bayes, Bayes.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&gpuNetParams.BPerr, BPerr.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&gpuNetParams.mapMaxOut, mapMaxOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&gpuNetParams.mapMinOut, mapMinOut.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&gpuNetParams.examples, examples.size() * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&gpuNetParams.NeurInLyr, NeurInLyr.size() * sizeof(int));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&gpuNetParams.priority, priority.size() * sizeof(int));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&gpuNetParams.InputRT, inputN * sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMalloc((void**)&gpuNetParams.MeanErr, sizeof(float));
if (cudaCheckStatus(cudaStatus) == true) goto Error;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(gpuNetParams.weights, Cweights, weights.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(gpuNetParams.ArcIn, CArcIn, ArcIn.size() * sizeof(int), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(gpuNetParams.ArcOut, CArcOut, ArcOut.size() * sizeof(int), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(gpuNetParams.NeuronOut, CNeuronOut, NeuronOut.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(gpuNetParams.Bayes, CBayes, Bayes.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(gpuNetParams.BPerr, CBPerr, BPerr.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(gpuNetParams.mapMaxOut, CmapMaxOut, mapMaxOut.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(gpuNetParams.mapMinOut, CmapMinOut, mapMinOut.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(gpuNetParams.examples, Cexamples, examples.size() * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(gpuNetParams.NeurInLyr, CNeurInLyr, NeurInLyr.size() * sizeof(int), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(gpuNetParams.priority, Cpriority, priority.size() * sizeof(int), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(gpuNetParams.MeanErr, CMeanErr, sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
if (false) {
Error:
//libero la memoria nella scheda grafica
cudaFree(gpuNetParams.weights);
cudaFree(gpuNetParams.ArcIn);
cudaFree(gpuNetParams.ArcOut);
cudaFree(gpuNetParams.NeuronOut);
cudaFree(gpuNetParams.examples);
cudaFree(gpuNetParams.BPerr);
cudaFree(gpuNetParams.mapMaxOut);
cudaFree(gpuNetParams.mapMinOut);
cudaFree(gpuNetParams.priority);
cudaFree(gpuNetParams.NeurInLyr);
cout << "ERRORE: libero la memoria della gpu. " << endl;
}
return cudaStatus;
}
//esegue il download dalla gpu dei parametri della rete
cudaError_t hostCUDAdownloadNetParams() {
cout << "downloading net params from gpu.." << endl;
float *Cweights = &weights[0];
float *CBayes = &Bayes[0];
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(GpuID);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(Cweights, gpuNetParams.weights, weights.size() * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
cudaStatus = cudaMemcpy(CBayes, gpuNetParams.Bayes, Bayes.size() * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
if (false) {
Error:
//libero la memoria nella scheda grafica
cudaFree(gpuNetParams.weights);
cudaFree(gpuNetParams.ArcIn);
cudaFree(gpuNetParams.ArcOut);
cudaFree(gpuNetParams.NeuronOut);
cudaFree(gpuNetParams.examples);
cudaFree(gpuNetParams.BPerr);
cudaFree(gpuNetParams.mapMaxOut);
cudaFree(gpuNetParams.mapMinOut);
cudaFree(gpuNetParams.priority);
cudaFree(gpuNetParams.NeurInLyr);
cout << "ERRORE: libero la memoria della gpu. " << endl;
}
return cudaStatus;
}
//esegue l'input della rete gia addestrata prendendo in input l'esempio dato
cudaError_t hostCUDAInputNet(float *input, int ThxBlock) {
//inportante verificare che l'input abbia la stessa dimansione dell'input della rete
cudaError cudaStatus;
cudaStatus = cudaSetDevice(GpuID);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
//////////////////lancio dei kernel all'interno della gpu////////////////
//int ThxBlock = 1024;
int startA = 0;
int endA = 0;
int startN = 0;
int endN = 0;
int numLayerArcs = 0;
int numLayerNeur = 0;
int numOfBlocksMax = 0;
int numOfBlocksA = 0;
int numOfBlocksN = 0;
int numOfBlocksOut = floorf(outputN / ThxBlock) + 1;
int outputRef = NeuronOut.size() - outputN;
long long t0in = 0, t1in = 0;
double elapsedInMilliseconds = 0;
t0in = PerformanceCounter();
//resetto il vettore contenente lo stato di attivazione dei neuroni
numOfBlocksA = (floorf(NeuronOut.size() / ThxBlock) + 1);
CUDAresetVector <<<numOfBlocksA, ThxBlock >>> (gpuNetParams.NeuronOut, NeuronOut.size());
cudaStatus = cudaMemcpy(gpuNetParams.InputRT, input, inputN * sizeof(float), cudaMemcpyHostToDevice);
if (cudaCheckStatus(cudaStatus) == true) goto Error;
//imposto i valori di input ai neuroni dello strato input
numOfBlocksA = (floorf(inputN / ThxBlock) + 1);
CUDAsetSingleInput <<<numOfBlocksA, ThxBlock>>> (gpuNetParams.NeuronOut, inputN, gpuNetParams.InputRT);
//propagazione dell'input nella rete
startA = 0; // indice di partenza dei vettori archi
endA = 0; // ultimo indice dei vettori archi
startN = 0; // indice di partenza dei vettori neuroni
endN = 0; // ultimo indice dei vettori neuroni
for (int i = 0; i < priority.size() - 1; i++) { //NB non viene applicata la sigmoide allo strato di input eventulmente correggi
startA = priority[i] + 1;
endA = priority[i + 1];
if (i < priority.size() - 2) {
startN = NeurInLyr[i + 1] + 1;
endN = NeurInLyr[i + 2];
}
numLayerArcs = endA - startA + 1;
numLayerNeur = endN - startN + 1;
numOfBlocksA = floorf(numLayerArcs / ThxBlock) + 1;
numOfBlocksN = floorf(numLayerNeur / ThxBlock) + 1;
if (i < priority.size() - 2) {
CUDAlayerInput <<<numOfBlocksA, ThxBlock >>> (gpuNetParams.weights, gpuNetParams.ArcIn, gpuNetParams.ArcOut, gpuNetParams.NeuronOut, startA, endA); //propago l'output dei neuroni al prossimo/i layer
CUDAbayesInput <<<numOfBlocksN, ThxBlock >>> (gpuNetParams.NeuronOut, gpuNetParams.Bayes, startN, endN); //applico il contributo dei bayes all output dei neuroni del layer corrente
CUDAsigLayer <<<numOfBlocksN, ThxBlock >>> (gpuNetParams.NeuronOut, startN, endN); //applico la sigmoide allo stato di attivazione dei neuroni
}
}
//copio l'output dei neuroni dello strato output nella memoria della cpu
cudaStatus = cudaMemcpy(&NeuronOut[0] + outputRef, gpuNetParams.NeuronOut + outputRef, outputN * sizeof(float), cudaMemcpyDeviceToHost); //TODO da errore e non carica il vettore trovare il BUG
if (cudaCheckStatus(cudaStatus) == true) goto Error;
t1in = PerformanceCounter();
elapsedInMilliseconds = ((t1in - t0in) * 1000.0) / PerformanceFrequency();
////////////////////////////////////visualizzazione dell'esempio///////////////////////////////////
float delta;
cout << "input time: " << elapsedInMilliseconds << " ms" << endl;
for (int on = 0; on < outputN; on++) {
delta = mapMaxOut[on] - mapMinOut[on];
cout << "Y" << on << ": " << (NeuronOut[NeuronOut.size() - outputN + on] * delta) + mapMinOut[on] << endl;
}
cout << endl;
///////////////////////////////////////////////////////////////////////////////////////////////////
if (false) {
Error:
//libero la memoria nella scheda grafica
cudaFree(gpuNetParams.weights);
cudaFree(gpuNetParams.ArcIn);
cudaFree(gpuNetParams.ArcOut);
cudaFree(gpuNetParams.NeuronOut);
cudaFree(gpuNetParams.examples);
cudaFree(gpuNetParams.BPerr);
cudaFree(gpuNetParams.mapMaxOut);
cudaFree(gpuNetParams.mapMinOut);
cudaFree(gpuNetParams.priority);
cudaFree(gpuNetParams.NeurInLyr);
}
return cudaStatus;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////CUDA UTILITY//////////////////////////////////////////////////////////////////
//verifica la corretta esecuzione di un operazione
inline cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
//verifica la corretta esecuzione di un operazione restituendo un bool
bool cudaCheckStatus(cudaError_t cudaStatus) {
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return true;
}
}
//stampa a schermo le principali proprietà della scheda
void printDeviceSpecs() {
printf("\nDevice: %s\n", prop.name);
printf("Cores clock: %d MHz\n", (prop.clockRate / 1000));
printf("Memory clock: %d MHz\n", (prop.memoryClockRate / 1000));
printf("Total global memmory %.2f MB\n", (float)(prop.totalGlobalMem / (1024 * 1024)));
printf("Max grid size: x %d, y %d, z %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("Max block axis size: x %d, y %d, z %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Warp size: %d\n", prop.warpSize);
printf("Max therads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max therads per multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor);
printf("Compute Mode: %d\n", prop.computeMode);
printf("Host mem access: %d\n", prop.canUseHostPointerForRegisteredMem);
printf("Shared mem per multiprocessor %.2f KB\n", (float)(prop.sharedMemPerMultiprocessor / 1024));
printf("Max shared mem per blocco %.2f KB\n", (float)(prop.sharedMemPerBlock / 1024));
}
//stampa i parametri della rete che vengono passati alla scheda
void printNetSpecs() {
cout << "dimensione del modello: " << sizeOfModel("MB") << " MB" << endl;
cout << "numero totale dei neuroni : " << NeuronOut.size() << "(" << sizeOfVector(NeuronOut, "KB") + sizeOfVector(Bayes, "KB") + sizeOfVector(BPerr, "KB")<< " KB)" << endl;
cout << "numero totale degli archi : " << weights.size() << "(" << sizeOfVector(weights, "MB") + sizeOfVector(ArcIn, "MB") + sizeOfVector(ArcOut, "MB") << " MB)" << endl;
cout << "numero esempi : " << examples.size() / (inputN + outputN) << " (" << sizeOfVector(examples, "MB") << " MB)" << endl;
}
//calcola il peso del modello
float sizeOfModel(string mesureUnit = "B") {
int size = 0;
int scale = 0;
size += sizeof(weights[0])*weights.size(); //dimensione del vettore pesi
size += sizeof(ArcIn[0])*ArcIn.size(); //dimensione del vettore contenente i target degli archi
size += sizeof(ArcOut[0])*ArcOut.size(); //dimensione del vettore contenete i neuroni base degli archi
size += sizeof(NeuronOut[0])*NeuronOut.size(); //dimensione del vettore contenente gli output dei neuroni
size += sizeof(Bayes[0])*Bayes.size(); //dimensione del vettore contenente i bayes
size += sizeof(BPerr[0])*BPerr.size(); //dimensione del vettore contenente
size += sizeof(priority[0])*priority.size(); // dimensione del vettore priorità
size += sizeof(examples[0])*examples.size(); //dimensione del vettore di esmpio
if (mesureUnit == "B") { scale = 1; } //Byte
else if (mesureUnit == "KB") { scale = 1024; } //Kilobyte
else if (mesureUnit == "MB") { scale = 1024 * 1024; } // Megabyte
else if (mesureUnit == "GB") { scale = 1024 * 1024 * 1024; } //Gigabyte
else { cout << "L'unità di misura non è corretta!!" << endl; return 0.0f; }
return (float)(size / scale);
}
template<typename T, typename A>
float sizeOfVector(vector<T, A> const& vect,string mesureUnit = "B") {
int size = 0;
int scale = 0;
size = sizeof(vect[0])*vect.size(); //dimensione del vettore di esmpio
if (mesureUnit == "B") { scale = 1; } //Byte
else if (mesureUnit == "KB") { scale = 1024; } //Kilobyte
else if (mesureUnit == "MB") { scale = 1024 * 1024; } // Megabyte
else if (mesureUnit == "GB") { scale = 1024 * 1024 * 1024; } //Gigabyte
else { cout << "L'unità di misura non è corretta!!" << endl; return 0.0f; }
return (float)((float)size /(float)scale);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////ALTRE FUNZIONI////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////WINDOWS HIGH SPEED TIMING//////////////////////////////////////////////////////
BOOL WINAPI QueryPerformanceCounter(_Out_ LARGE_INTEGER *lpPerformanceCount);
BOOL WINAPI QueryPerformanceFrequency(_Out_ LARGE_INTEGER *lpFrequency);
inline long long PerformanceCounter() noexcept
{
LARGE_INTEGER li;
::QueryPerformanceCounter(&li);
return li.QuadPart;
}
inline long long PerformanceFrequency() noexcept
{
LARGE_INTEGER li;
::QueryPerformanceFrequency(&li);
return li.QuadPart;
}
/* HOW TO USE:
long long t0 = PerformanceCounter();
//code to bench..
long long t1 = PerformanceCounter();
double elapsedMilliseconds = ((t1 - t0) * 1000.0) / PerformanceFrequency();
*/
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
};
class genEvolve {
private:
vector<MLP> mlps; // vettore di reti mlp
vector<Hopfield> hpds; // vettore di reti Hopfield
public:
};
/*
EXAMPLE:
CUDAcore gtx760(0); //create connection with gpu
gtx760.printDeviceSpecs(); //print specs of gpu
int layer = 10;
int columns = 50;
int out = 4;
int in = 9;
MLP a("cudaTest"); //create mlp object
a.qubeNet(layer, columns, in, out, true, 0.0001f);
a.setNetMap(800, 0);
a.getDataset("datnc"); //loading dataset from file
a.datasetOffset(200.0f); //adding an offset to the dataset
gtx760.cudaNetCopyMLP(&a); //prepare structure of net to be uploded to gpu memory
gtx760.cudaNetCopyExamples(&a); //prepare dataset to be uploded to gpu memory
gtx760.hostCUDAtrainingNet(10.0e-10f, 200, 256); //upload nedded structure into gpu and start accelerated training
gtx760.cudaNetPasteMLP(&a); //copy trained model into cpu memory
a.saveNet("datncNet-l(5)-c(60)-FC"); //saving model
a.BP(5, 10.0e-10f, 0, 1); //continue to train net into cpu
a.saveNet("CpuTest5"); //new model saving
*/
|
7cc8281037d6e317af4ac70e1d3b03a175e68ed9.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
int main(){
int devCount;
hipGetDeviceCount(&devCount);
printf("%d\n", devCount);
return 0;
}
| 7cc8281037d6e317af4ac70e1d3b03a175e68ed9.cu | #include <cuda.h>
#include <stdio.h>
int main(){
int devCount;
cudaGetDeviceCount(&devCount);
printf("%d\n", devCount);
return 0;
}
|
0f7e3a22e2e17751a21a6f751768873820982ff0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <iostream>
//8 X 8 mask
#define MASK_LEN 8
#define MASK_OFFSET (MASK_LEN / 2)
/*as mask is never changing we can define a constant memory on the device side so that
we do not have to copu again and again and loading from const cache is much much faster that
loading from d-ram.
*/
__constant__ int mask[MASK_LEN * MASK_LEN];
__global__ void conv_2d(int* Mat, int* res, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int start_r = row - MASK_OFFSET;
int start_c = col - MASK_OFFSET;
int temp = 0;
for (int i = 0; i < MASK_LEN; i++)
{
for (int j = 0; j < MASK_LEN; j++)
{
if ((start_r + i >= 0) && (start_r + i < n))
{
if ((start_c + j >= 0) && (start_c + j < n))
{
temp += Mat[(start_r + i) * n + (start_c + j)] * mask[i * MASK_LEN + j];
}
}
}
}
res[row * n + col] = temp;
}
// Initialize
void Array_init(int* a, int n, int div) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
a[i*n + j] = rand() % div;
}
}
}
void check_answer(int* mat, int* mask, int* res, int n) {
int temp;
int offset_r;
int offset_c;
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
temp = 0;
for (int k = 0; k < MASK_LEN; k++)
{
offset_r = i - MASK_OFFSET + k;
for (int l = 0; l < MASK_LEN; l++)
{
offset_c = j - MASK_OFFSET + l;
if (offset_r >= 0 && offset_r < n)
{
if (offset_c >= 0 && offset_c < n)
{
temp += mat[offset_r * n + offset_c] * mask[k * MASK_LEN + l];
}
}
}
}
assert(res[i * n + j] == temp);
}
}
}
int main() {
// number of elements in result array
int n = 1 << 12;
int n_bytes = n * n * sizeof(int);
int m_bytes = MASK_LEN * MASK_LEN * sizeof(int);
//allocate the array
int* h_Mat = new int[n * n];
Array_init(h_Mat, n, 100);
//allocate the mask and intialize it
int* h_mask = new int[MASK_LEN * MASK_LEN];
Array_init(h_mask, MASK_LEN, 10);
//allocate space for result
int* h_result = new int[n * n];
//allocate space on device memory
int* d_Mat, * d_res;
hipMalloc(&d_Mat, n_bytes);
hipMalloc(&d_res, n_bytes);
hipMemcpy(d_Mat, h_Mat, n_bytes, hipMemcpyHostToDevice);
//special function to copy to a symbol
hipMemcpyToSymbol(mask, h_mask, m_bytes);
int threads = 16;
int grid = (n + threads - 1) / threads;
dim3 block_dim(threads, threads);
dim3 grid_dim(grid, grid);
hipLaunchKernelGGL(( conv_2d) , dim3(grid_dim), dim3(block_dim) , 0, 0, d_Mat, d_res, n);
hipMemcpy(h_result, d_res, n_bytes, hipMemcpyDeviceToHost);
check_answer(h_Mat, h_mask, h_result, n);
free(h_result);
free(h_mask);
free(h_Mat);
hipFree(d_Mat);
hipFree(d_res);
printf("COMPLETED SUCCESFULLY\n");
return 0;
} | 0f7e3a22e2e17751a21a6f751768873820982ff0.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <iostream>
//8 X 8 mask
#define MASK_LEN 8
#define MASK_OFFSET (MASK_LEN / 2)
/*as mask is never changing we can define a constant memory on the device side so that
we do not have to copu again and again and loading from const cache is much much faster that
loading from d-ram.
*/
__constant__ int mask[MASK_LEN * MASK_LEN];
__global__ void conv_2d(int* Mat, int* res, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int start_r = row - MASK_OFFSET;
int start_c = col - MASK_OFFSET;
int temp = 0;
for (int i = 0; i < MASK_LEN; i++)
{
for (int j = 0; j < MASK_LEN; j++)
{
if ((start_r + i >= 0) && (start_r + i < n))
{
if ((start_c + j >= 0) && (start_c + j < n))
{
temp += Mat[(start_r + i) * n + (start_c + j)] * mask[i * MASK_LEN + j];
}
}
}
}
res[row * n + col] = temp;
}
// Initialize
void Array_init(int* a, int n, int div) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
a[i*n + j] = rand() % div;
}
}
}
void check_answer(int* mat, int* mask, int* res, int n) {
int temp;
int offset_r;
int offset_c;
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
temp = 0;
for (int k = 0; k < MASK_LEN; k++)
{
offset_r = i - MASK_OFFSET + k;
for (int l = 0; l < MASK_LEN; l++)
{
offset_c = j - MASK_OFFSET + l;
if (offset_r >= 0 && offset_r < n)
{
if (offset_c >= 0 && offset_c < n)
{
temp += mat[offset_r * n + offset_c] * mask[k * MASK_LEN + l];
}
}
}
}
assert(res[i * n + j] == temp);
}
}
}
int main() {
// number of elements in result array
int n = 1 << 12;
int n_bytes = n * n * sizeof(int);
int m_bytes = MASK_LEN * MASK_LEN * sizeof(int);
//allocate the array
int* h_Mat = new int[n * n];
Array_init(h_Mat, n, 100);
//allocate the mask and intialize it
int* h_mask = new int[MASK_LEN * MASK_LEN];
Array_init(h_mask, MASK_LEN, 10);
//allocate space for result
int* h_result = new int[n * n];
//allocate space on device memory
int* d_Mat, * d_res;
cudaMalloc(&d_Mat, n_bytes);
cudaMalloc(&d_res, n_bytes);
cudaMemcpy(d_Mat, h_Mat, n_bytes, cudaMemcpyHostToDevice);
//special function to copy to a symbol
cudaMemcpyToSymbol(mask, h_mask, m_bytes);
int threads = 16;
int grid = (n + threads - 1) / threads;
dim3 block_dim(threads, threads);
dim3 grid_dim(grid, grid);
conv_2d <<<grid_dim, block_dim >>> (d_Mat, d_res, n);
cudaMemcpy(h_result, d_res, n_bytes, cudaMemcpyDeviceToHost);
check_answer(h_Mat, h_mask, h_result, n);
free(h_result);
free(h_mask);
free(h_Mat);
cudaFree(d_Mat);
cudaFree(d_res);
printf("COMPLETED SUCCESFULLY\n");
return 0;
} |
bc1956b424c30b24b0379251721686e591df3ea1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name(s) of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <vector>
#include <array>
#include <string>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "cutensor.h"
#define HANDLE_ERROR(x) { const auto err = x;\
if (err == CUTENSOR_STATUS_NOT_SUPPORTED) { return false; }\
if (err != CUTENSOR_STATUS_SUCCESS) {printf("Error: %s in line %d\n", cutensorGetErrorString(err), __LINE__); return false; } }
#define HANDLE_CUDA_ERROR(x) { const auto err = x; if( err != hipSuccess ) { printf("Error: %d in line %d\n", err, __LINE__); exit(-1); } }
template<typename U>
struct CuTensorTypeTraits;
template<>
struct CuTensorTypeTraits<double> {
static const hipDataType cudaType = HIP_R_64F;
static const cutensorComputeType_t cutensorType = CUTENSOR_COMPUTE_64F;
typedef double ScalarType;
};
template<>
struct CuTensorTypeTraits<float> {
static const hipDataType cudaType = HIP_R_32F;
static const cutensorComputeType_t cutensorType = CUTENSOR_COMPUTE_32F;
typedef float ScalarType;
};
template<>
struct CuTensorTypeTraits<__half> {
static const hipDataType cudaType = HIP_R_16F;
static const cutensorComputeType_t cutensorType = CUTENSOR_COMPUTE_32F;
typedef float ScalarType;
};
template<typename ComputeType,
typename IntType, int kMaxNumModes_>
struct Einsum
{
static const std::vector<IntType> emptyVec;
Einsum(const std::string &equation,
const std::vector<IntType> &A_shape,
const std::vector<IntType> &B_shape = emptyVec) :
numModesA_(A_shape.size()),
numModesB_(B_shape.size()),
numModesC_(0),
isInitialized_(false)
{
const auto arrow_pos = equation.find("->");
const auto comma_pos = equation.find(",");
const auto dots = equation.find("...");
const bool isBroadcast = (dots != std::string::npos);
const bool isImplicit = (arrow_pos == std::string::npos);
if (isBroadcast) // TODO
{
return;
}
const bool usesB = (comma_pos != std::string::npos);
size_t a_start = 0;
size_t a_end = isImplicit ? ((comma_pos == std::string::npos) ? equation.size() : comma_pos) :
((comma_pos == std::string::npos) ? arrow_pos : comma_pos);
size_t b_start = usesB ? comma_pos + 1 : 0;
size_t b_end = usesB ? (isImplicit ? equation.size() : arrow_pos) : 0;
size_t c_start = isImplicit ? equation.size() : arrow_pos + 2;
size_t c_end = equation.size();
char modeA[kMaxNumModes_ + 2];
uint32_t numModesA = 0;
for (int i = a_start; i < a_end && numModesA < kMaxNumModes_ + 2; ++i){
if (equation.at(i) != ' ') // skip spaces
{
modeA[numModesA++] = equation.at(i);
}
}
char modeB[kMaxNumModes_ + 2];
uint32_t numModesB = 0;
for (int i = b_start; i < b_end && numModesB < kMaxNumModes_ + 2; ++i){
if (equation.at(i) != ' ') // skip spaces
{
modeB[numModesB++] = equation.at(i);
}
}
char modeC[kMaxNumModes_ + 2];
uint32_t numModesC = 0;
for (int i = c_start; i < c_end && numModesC < kMaxNumModes_ + 2; ++i){
if (equation.at(i) != ' ') // skip spaces
{
modeC[numModesC++] = equation.at(i);
}
}
if ((numModesA != numModesA_) || (numModesB != numModesB_))
{
// substring size and shape don't match
return;
}
if (numModesA_ > kMaxNumModes_ || numModesB_ > kMaxNumModes_)
{
// too many modes
return;
}
/**
* Copy all modes from modeA to modeC if they don't appear in modeB
*/
auto copyModesIf = [](const char* modeA, uint32_t numModesA,
const char* modeB, uint32_t numModesB,
char* modeC, uint32_t &numModesC)
{
for (uint32_t i = 0; i < numModesA; i++)
{
auto mode = modeA[i];
bool found = false;
for(uint32_t j=0; j < numModesB; ++j){
if(mode == modeB[j])
{
found = true;
break;
}
}
if (!found) // is non-contracted mode
{
modeC[numModesC++] = mode;
if (numModesC > kMaxNumModes_)
{
// too many modes
return false;
}
}
}
return true;
};
std::array<char, kMaxNumModes_+1> implicitModeC;
char* redirectModeC;
if (isImplicit)
{
// we have to copy all non-contracted modes from A over to C
if (copyModesIf(modeA, numModesA_, modeB, numModesB_, implicitModeC.data(), numModesC_) == false)
{
return;
}
// we have to copy all non-contracted modes from B over to C
if (copyModesIf(modeB, numModesB_, modeA, numModesA_, implicitModeC.data(), numModesC_) == false)
{
return;
}
std::sort(implicitModeC.begin(), std::next(implicitModeC.begin(), numModesC_)); // modes are sorted w.r.t. lexical order
implicitModeC[numModesC_] = '\0';
redirectModeC = implicitModeC.data();
}
else
{
redirectModeC = modeC;
numModesC_ = numModesC;
}
for (uint32_t i = 0; i < numModesA_; i++)
{
modesA_[i] = modeA[numModesA_ - i - 1];
extentA_[i] = A_shape[numModesA_ - i - 1];
}
for (uint32_t i = 0; i < numModesB_; i++)
{
modesB_[i] = modeB[numModesB_ - i - 1];
extentB_[i] = B_shape[numModesB_ - i - 1];
}
for (uint32_t i = 0; i < numModesC_; i++)
{
const auto mode = redirectModeC[numModesC_ - i - 1];
modesC_[i] = mode;
bool found = false;
for (uint32_t j=0; j < numModesA_; ++j)
{
if (modesA_[j] == mode)
{
extentC_[i] = extentA_[j];
found = true;
break;
}
}
for (uint32_t j=0; !found && j < numModesB_; ++j)
{
if (modesB_[j] == mode)
{
extentC_[i] = extentB_[j];
break;
}
}
}
isInitialized_ = true;
}
size_t getWorksize() const { return kWorksize_; }
std::vector<IntType> getOutputShape() const
{
if (!isInitialized_) return {};
std::vector<IntType> extentC(numModesC_);
for (int i=0; i < numModesC_; ++i)
{
extentC[i] = extentC_.at(numModesC_ - i - 1);
}
return extentC;
}
/**
* Computes the einsum call A,B->C
*
* \param[in] A_raw device pointer of A
* \param[in] B_raw device pointer of B
* \param[out] C_raw device pointer of C
* \param[out] wor_raw device pointer to the scratchpad memory
* Dispatch to contraction
*/
bool execute(const cutensorHandle_t *handle,
const void* A_raw,
const void* B_raw,
void* C_raw,
void *work_raw, hipStream_t stream) const
{
if (!isInitialized_) return false;
hipDataType cudaType = CuTensorTypeTraits<ComputeType>::cudaType;
cutensorComputeType_t computeType = CuTensorTypeTraits<ComputeType>::cutensorType;
cutensorTensorDescriptor_t descA;
HANDLE_ERROR(cutensorInitTensorDescriptor(handle,
&descA,
numModesA_,
extentA_.data(),
NULL /* = stride */,
cudaType, CUTENSOR_OP_IDENTITY));
cutensorTensorDescriptor_t descC;
HANDLE_ERROR(cutensorInitTensorDescriptor(handle,
&descC,
numModesC_,
extentC_.data(),
NULL /* = stride*/,
cudaType, CUTENSOR_OP_IDENTITY));
uint32_t alignmentRequirementA;
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
A_raw, &descA, &alignmentRequirementA));
uint32_t alignmentRequirementC;
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
C_raw, &descC, &alignmentRequirementC));
cutensorTensorDescriptor_t descB;
uint32_t alignmentRequirementB;
if (numModesB_ > 0)
{
// dispatch to contraction
HANDLE_ERROR(cutensorInitTensorDescriptor(handle,
&descB,
numModesB_,
extentB_.data(),
NULL /* = stride*/,
cudaType, CUTENSOR_OP_IDENTITY));
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
B_raw, &descB, &alignmentRequirementB));
cutensorContractionDescriptor_t desc;
HANDLE_ERROR(cutensorInitContractionDescriptor(handle, &desc,
&descA, modesA_.data(), alignmentRequirementA,
&descB, modesB_.data(), alignmentRequirementB,
&descC, modesC_.data(), alignmentRequirementC,
&descC, modesC_.data(), alignmentRequirementC,
computeType));
cutensorAlgo_t algo = CUTENSOR_ALGO_DEFAULT;
cutensorContractionFind_t find;
HANDLE_ERROR(cutensorInitContractionFind(
handle, &find,
algo));
cutensorContractionPlan_t plan;
HANDLE_ERROR(cutensorInitContractionPlan(handle,
&plan, &desc, &find, kWorksize_));
typename CuTensorTypeTraits<ComputeType>::ScalarType alpha = 1;
typename CuTensorTypeTraits<ComputeType>::ScalarType beta = 0;
HANDLE_ERROR(cutensorContraction(handle, &plan,
(void*) &alpha, A_raw, B_raw,
(void*) &beta, C_raw, C_raw,
work_raw, kWorksize_, stream));
}
else
{
// dispatch to reduction
typename CuTensorTypeTraits<ComputeType>::ScalarType alpha = 1;
typename CuTensorTypeTraits<ComputeType>::ScalarType beta = 0;
HANDLE_ERROR(cutensorReduction(handle,
(const void*)&alpha, A_raw, &descA, modesA_.data(),
(const void*)&beta, A_raw, &descC, modesC_.data(), // beta == 0 => will not be used
C_raw, &descC, modesC_.data(),
CUTENSOR_OP_ADD, computeType, work_raw, kWorksize_, stream));
}
return true;
}
bool isInitialized() const { return isInitialized_; }
private:
static const size_t kWorksize_ = 1024ULL * 1024ULL * 8ULL * 128ULL;
uint32_t numModesA_;
uint32_t numModesB_;
uint32_t numModesC_;
bool isInitialized_;
std::array<int, kMaxNumModes_> modesA_;
std::array<int, kMaxNumModes_> modesB_;
std::array<int, kMaxNumModes_> modesC_;
std::array<int64_t, kMaxNumModes_> extentA_;
std::array<int64_t, kMaxNumModes_> extentB_;
std::array<int64_t, kMaxNumModes_> extentC_;
};
void einsum(cutensorHandle_t *handle,
const std::vector<int> &A_shape,
const std::vector<int> &B_shape,
const std::string &subscripts)
{
constexpr int kMaxNumModes_ = 40; // maximal number of modes supported by cuTENSOR
typedef float Compute;
Einsum<Compute, int, kMaxNumModes_> myEinsum(subscripts, A_shape, B_shape);
if (!myEinsum.isInitialized()) {
return;
}
size_t totalElementsA = 1;
for (const auto e : A_shape) {
totalElementsA *= e;
}
size_t totalElementsB = 1;
for (const auto e : B_shape) {
totalElementsB *= e;
}
auto C_shape = myEinsum.getOutputShape();
size_t totalElementsC = 1;
for (const auto e : C_shape) {
totalElementsC *= e;
}
void* A_raw, *B_raw, *output_raw, *workspace_raw;
HANDLE_CUDA_ERROR(hipMalloc(&A_raw, sizeof(Compute) * totalElementsA));
HANDLE_CUDA_ERROR(hipMalloc(&B_raw, sizeof(Compute) * totalElementsB));
HANDLE_CUDA_ERROR(hipMalloc(&output_raw, sizeof(Compute) * totalElementsC));
HANDLE_CUDA_ERROR(hipMalloc(&workspace_raw, myEinsum.getWorksize()));
auto ret = myEinsum.execute(handle, A_raw, B_raw, output_raw, workspace_raw, 0);
hipFree(A_raw);
hipFree(B_raw);
hipFree(output_raw);
hipFree(workspace_raw);
if (!ret) {
printf("%s: not supported\n", subscripts.c_str());
}else{
printf("%s: succeeded\n", subscripts.c_str());
}
}
int main()
{
cutensorHandle_t handle;
cutensorInit(&handle);
/**********************
* Setup planCache (optional)
**********************/
constexpr int32_t numCachelines = 1024;
size_t sizeCache = numCachelines * sizeof(cutensorPlanCacheline_t);
cutensorPlanCacheline_t* cachelines = (cutensorPlanCacheline_t*) malloc(sizeCache);
HANDLE_ERROR( cutensorHandleAttachPlanCachelines(&handle, cachelines, numCachelines) );
einsum(&handle, {2, 4, 5}, {4, 8, 7}, "ijn,jmk->inkm"); // contraction (explict)
einsum(&handle, {2, 4, 5}, {4, 8, 7}, "ijn,jmk"); // contraction (implicit)
einsum(&handle, {2, 4, 5}, {}, "nij"); // permutation (implicit)
einsum(&handle, {2, 4, 5}, {}, "nij->ijn"); // permutation (same as previous example, but explicit)
einsum(&handle, {2, 4, 5}, {}, "nij->ji"); // reduction
// Detach cache and free-up resources
HANDLE_ERROR( cutensorHandleDetachPlanCachelines(&handle) );
if (cachelines) free (cachelines);
return 0;
}
| bc1956b424c30b24b0379251721686e591df3ea1.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name(s) of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <vector>
#include <array>
#include <string>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "cutensor.h"
#define HANDLE_ERROR(x) { const auto err = x;\
if (err == CUTENSOR_STATUS_NOT_SUPPORTED) { return false; }\
if (err != CUTENSOR_STATUS_SUCCESS) {printf("Error: %s in line %d\n", cutensorGetErrorString(err), __LINE__); return false; } }
#define HANDLE_CUDA_ERROR(x) { const auto err = x; if( err != cudaSuccess ) { printf("Error: %d in line %d\n", err, __LINE__); exit(-1); } }
template<typename U>
struct CuTensorTypeTraits;
template<>
struct CuTensorTypeTraits<double> {
static const cudaDataType_t cudaType = CUDA_R_64F;
static const cutensorComputeType_t cutensorType = CUTENSOR_COMPUTE_64F;
typedef double ScalarType;
};
template<>
struct CuTensorTypeTraits<float> {
static const cudaDataType_t cudaType = CUDA_R_32F;
static const cutensorComputeType_t cutensorType = CUTENSOR_COMPUTE_32F;
typedef float ScalarType;
};
template<>
struct CuTensorTypeTraits<__half> {
static const cudaDataType_t cudaType = CUDA_R_16F;
static const cutensorComputeType_t cutensorType = CUTENSOR_COMPUTE_32F;
typedef float ScalarType;
};
template<typename ComputeType,
typename IntType, int kMaxNumModes_>
struct Einsum
{
static const std::vector<IntType> emptyVec;
Einsum(const std::string &equation,
const std::vector<IntType> &A_shape,
const std::vector<IntType> &B_shape = emptyVec) :
numModesA_(A_shape.size()),
numModesB_(B_shape.size()),
numModesC_(0),
isInitialized_(false)
{
const auto arrow_pos = equation.find("->");
const auto comma_pos = equation.find(",");
const auto dots = equation.find("...");
const bool isBroadcast = (dots != std::string::npos);
const bool isImplicit = (arrow_pos == std::string::npos);
if (isBroadcast) // TODO
{
return;
}
const bool usesB = (comma_pos != std::string::npos);
size_t a_start = 0;
size_t a_end = isImplicit ? ((comma_pos == std::string::npos) ? equation.size() : comma_pos) :
((comma_pos == std::string::npos) ? arrow_pos : comma_pos);
size_t b_start = usesB ? comma_pos + 1 : 0;
size_t b_end = usesB ? (isImplicit ? equation.size() : arrow_pos) : 0;
size_t c_start = isImplicit ? equation.size() : arrow_pos + 2;
size_t c_end = equation.size();
char modeA[kMaxNumModes_ + 2];
uint32_t numModesA = 0;
for (int i = a_start; i < a_end && numModesA < kMaxNumModes_ + 2; ++i){
if (equation.at(i) != ' ') // skip spaces
{
modeA[numModesA++] = equation.at(i);
}
}
char modeB[kMaxNumModes_ + 2];
uint32_t numModesB = 0;
for (int i = b_start; i < b_end && numModesB < kMaxNumModes_ + 2; ++i){
if (equation.at(i) != ' ') // skip spaces
{
modeB[numModesB++] = equation.at(i);
}
}
char modeC[kMaxNumModes_ + 2];
uint32_t numModesC = 0;
for (int i = c_start; i < c_end && numModesC < kMaxNumModes_ + 2; ++i){
if (equation.at(i) != ' ') // skip spaces
{
modeC[numModesC++] = equation.at(i);
}
}
if ((numModesA != numModesA_) || (numModesB != numModesB_))
{
// substring size and shape don't match
return;
}
if (numModesA_ > kMaxNumModes_ || numModesB_ > kMaxNumModes_)
{
// too many modes
return;
}
/**
* Copy all modes from modeA to modeC if they don't appear in modeB
*/
auto copyModesIf = [](const char* modeA, uint32_t numModesA,
const char* modeB, uint32_t numModesB,
char* modeC, uint32_t &numModesC)
{
for (uint32_t i = 0; i < numModesA; i++)
{
auto mode = modeA[i];
bool found = false;
for(uint32_t j=0; j < numModesB; ++j){
if(mode == modeB[j])
{
found = true;
break;
}
}
if (!found) // is non-contracted mode
{
modeC[numModesC++] = mode;
if (numModesC > kMaxNumModes_)
{
// too many modes
return false;
}
}
}
return true;
};
std::array<char, kMaxNumModes_+1> implicitModeC;
char* redirectModeC;
if (isImplicit)
{
// we have to copy all non-contracted modes from A over to C
if (copyModesIf(modeA, numModesA_, modeB, numModesB_, implicitModeC.data(), numModesC_) == false)
{
return;
}
// we have to copy all non-contracted modes from B over to C
if (copyModesIf(modeB, numModesB_, modeA, numModesA_, implicitModeC.data(), numModesC_) == false)
{
return;
}
std::sort(implicitModeC.begin(), std::next(implicitModeC.begin(), numModesC_)); // modes are sorted w.r.t. lexical order
implicitModeC[numModesC_] = '\0';
redirectModeC = implicitModeC.data();
}
else
{
redirectModeC = modeC;
numModesC_ = numModesC;
}
for (uint32_t i = 0; i < numModesA_; i++)
{
modesA_[i] = modeA[numModesA_ - i - 1];
extentA_[i] = A_shape[numModesA_ - i - 1];
}
for (uint32_t i = 0; i < numModesB_; i++)
{
modesB_[i] = modeB[numModesB_ - i - 1];
extentB_[i] = B_shape[numModesB_ - i - 1];
}
for (uint32_t i = 0; i < numModesC_; i++)
{
const auto mode = redirectModeC[numModesC_ - i - 1];
modesC_[i] = mode;
bool found = false;
for (uint32_t j=0; j < numModesA_; ++j)
{
if (modesA_[j] == mode)
{
extentC_[i] = extentA_[j];
found = true;
break;
}
}
for (uint32_t j=0; !found && j < numModesB_; ++j)
{
if (modesB_[j] == mode)
{
extentC_[i] = extentB_[j];
break;
}
}
}
isInitialized_ = true;
}
size_t getWorksize() const { return kWorksize_; }
std::vector<IntType> getOutputShape() const
{
if (!isInitialized_) return {};
std::vector<IntType> extentC(numModesC_);
for (int i=0; i < numModesC_; ++i)
{
extentC[i] = extentC_.at(numModesC_ - i - 1);
}
return extentC;
}
/**
* Computes the einsum call A,B->C
*
* \param[in] A_raw device pointer of A
* \param[in] B_raw device pointer of B
* \param[out] C_raw device pointer of C
* \param[out] wor_raw device pointer to the scratchpad memory
* Dispatch to contraction
*/
bool execute(const cutensorHandle_t *handle,
const void* A_raw,
const void* B_raw,
void* C_raw,
void *work_raw, cudaStream_t stream) const
{
if (!isInitialized_) return false;
cudaDataType_t cudaType = CuTensorTypeTraits<ComputeType>::cudaType;
cutensorComputeType_t computeType = CuTensorTypeTraits<ComputeType>::cutensorType;
cutensorTensorDescriptor_t descA;
HANDLE_ERROR(cutensorInitTensorDescriptor(handle,
&descA,
numModesA_,
extentA_.data(),
NULL /* = stride */,
cudaType, CUTENSOR_OP_IDENTITY));
cutensorTensorDescriptor_t descC;
HANDLE_ERROR(cutensorInitTensorDescriptor(handle,
&descC,
numModesC_,
extentC_.data(),
NULL /* = stride*/,
cudaType, CUTENSOR_OP_IDENTITY));
uint32_t alignmentRequirementA;
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
A_raw, &descA, &alignmentRequirementA));
uint32_t alignmentRequirementC;
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
C_raw, &descC, &alignmentRequirementC));
cutensorTensorDescriptor_t descB;
uint32_t alignmentRequirementB;
if (numModesB_ > 0)
{
// dispatch to contraction
HANDLE_ERROR(cutensorInitTensorDescriptor(handle,
&descB,
numModesB_,
extentB_.data(),
NULL /* = stride*/,
cudaType, CUTENSOR_OP_IDENTITY));
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
B_raw, &descB, &alignmentRequirementB));
cutensorContractionDescriptor_t desc;
HANDLE_ERROR(cutensorInitContractionDescriptor(handle, &desc,
&descA, modesA_.data(), alignmentRequirementA,
&descB, modesB_.data(), alignmentRequirementB,
&descC, modesC_.data(), alignmentRequirementC,
&descC, modesC_.data(), alignmentRequirementC,
computeType));
cutensorAlgo_t algo = CUTENSOR_ALGO_DEFAULT;
cutensorContractionFind_t find;
HANDLE_ERROR(cutensorInitContractionFind(
handle, &find,
algo));
cutensorContractionPlan_t plan;
HANDLE_ERROR(cutensorInitContractionPlan(handle,
&plan, &desc, &find, kWorksize_));
typename CuTensorTypeTraits<ComputeType>::ScalarType alpha = 1;
typename CuTensorTypeTraits<ComputeType>::ScalarType beta = 0;
HANDLE_ERROR(cutensorContraction(handle, &plan,
(void*) &alpha, A_raw, B_raw,
(void*) &beta, C_raw, C_raw,
work_raw, kWorksize_, stream));
}
else
{
// dispatch to reduction
typename CuTensorTypeTraits<ComputeType>::ScalarType alpha = 1;
typename CuTensorTypeTraits<ComputeType>::ScalarType beta = 0;
HANDLE_ERROR(cutensorReduction(handle,
(const void*)&alpha, A_raw, &descA, modesA_.data(),
(const void*)&beta, A_raw, &descC, modesC_.data(), // beta == 0 => will not be used
C_raw, &descC, modesC_.data(),
CUTENSOR_OP_ADD, computeType, work_raw, kWorksize_, stream));
}
return true;
}
bool isInitialized() const { return isInitialized_; }
private:
static const size_t kWorksize_ = 1024ULL * 1024ULL * 8ULL * 128ULL;
uint32_t numModesA_;
uint32_t numModesB_;
uint32_t numModesC_;
bool isInitialized_;
std::array<int, kMaxNumModes_> modesA_;
std::array<int, kMaxNumModes_> modesB_;
std::array<int, kMaxNumModes_> modesC_;
std::array<int64_t, kMaxNumModes_> extentA_;
std::array<int64_t, kMaxNumModes_> extentB_;
std::array<int64_t, kMaxNumModes_> extentC_;
};
void einsum(cutensorHandle_t *handle,
const std::vector<int> &A_shape,
const std::vector<int> &B_shape,
const std::string &subscripts)
{
constexpr int kMaxNumModes_ = 40; // maximal number of modes supported by cuTENSOR
typedef float Compute;
Einsum<Compute, int, kMaxNumModes_> myEinsum(subscripts, A_shape, B_shape);
if (!myEinsum.isInitialized()) {
return;
}
size_t totalElementsA = 1;
for (const auto e : A_shape) {
totalElementsA *= e;
}
size_t totalElementsB = 1;
for (const auto e : B_shape) {
totalElementsB *= e;
}
auto C_shape = myEinsum.getOutputShape();
size_t totalElementsC = 1;
for (const auto e : C_shape) {
totalElementsC *= e;
}
void* A_raw, *B_raw, *output_raw, *workspace_raw;
HANDLE_CUDA_ERROR(cudaMalloc(&A_raw, sizeof(Compute) * totalElementsA));
HANDLE_CUDA_ERROR(cudaMalloc(&B_raw, sizeof(Compute) * totalElementsB));
HANDLE_CUDA_ERROR(cudaMalloc(&output_raw, sizeof(Compute) * totalElementsC));
HANDLE_CUDA_ERROR(cudaMalloc(&workspace_raw, myEinsum.getWorksize()));
auto ret = myEinsum.execute(handle, A_raw, B_raw, output_raw, workspace_raw, 0);
cudaFree(A_raw);
cudaFree(B_raw);
cudaFree(output_raw);
cudaFree(workspace_raw);
if (!ret) {
printf("%s: not supported\n", subscripts.c_str());
}else{
printf("%s: succeeded\n", subscripts.c_str());
}
}
int main()
{
cutensorHandle_t handle;
cutensorInit(&handle);
/**********************
* Setup planCache (optional)
**********************/
constexpr int32_t numCachelines = 1024;
size_t sizeCache = numCachelines * sizeof(cutensorPlanCacheline_t);
cutensorPlanCacheline_t* cachelines = (cutensorPlanCacheline_t*) malloc(sizeCache);
HANDLE_ERROR( cutensorHandleAttachPlanCachelines(&handle, cachelines, numCachelines) );
einsum(&handle, {2, 4, 5}, {4, 8, 7}, "ijn,jmk->inkm"); // contraction (explict)
einsum(&handle, {2, 4, 5}, {4, 8, 7}, "ijn,jmk"); // contraction (implicit)
einsum(&handle, {2, 4, 5}, {}, "nij"); // permutation (implicit)
einsum(&handle, {2, 4, 5}, {}, "nij->ijn"); // permutation (same as previous example, but explicit)
einsum(&handle, {2, 4, 5}, {}, "nij->ji"); // reduction
// Detach cache and free-up resources
HANDLE_ERROR( cutensorHandleDetachPlanCachelines(&handle) );
if (cachelines) free (cachelines);
return 0;
}
|
8f171aa122a8450cb11344ecf8dc1e26be0b4001.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _HELLOWORLD_KERNEL_H_
#define _HELLOWORLD_KERNEL_H_
#include <stdio.h>
///////////////////////////////////////////////////////////
// Simple Hello World kernel
// @param gpu_odata output data in global memory
///////////////////////////////////////////////////////////
__global__ void CudaTest_kernel(int max_sim_num, int threshold, unsigned long* di_index, unsigned long* di_query, unsigned long* result, char* di_static_table, unsigned int* result_num, unsigned int frame_num_index, unsigned int frame_num_query){
const unsigned int thread_idx = threadIdx.x;
const unsigned int block_idx = blockIdx.x;
const unsigned int block_idy = blockIdx.y;
const unsigned int di_index_subset_begin = block_idx*1024;
const unsigned int di_index_subset_size = ((block_idx+1)*1024>frame_num_index)?(frame_num_index%1024):1024;
const unsigned int di_query_subset_begin = block_idy*512;
const unsigned int di_query_subset_size = ((block_idy+1)*512>frame_num_query)?(frame_num_query%512):512;
__shared__ unsigned long di_index_subset[1024];
__shared__ unsigned long di_query_subset[512];
__shared__ char shared_static_table[256];
if(thread_idx<di_index_subset_size)
di_index_subset[thread_idx] = di_index[di_index_subset_begin+thread_idx];
if(thread_idx<di_query_subset_size)
di_query_subset[thread_idx] = di_query[di_query_subset_begin+thread_idx];
if(thread_idx<256)
shared_static_table[thread_idx]= di_static_table[thread_idx];
__syncthreads();
unsigned long xor_result = 0;
unsigned int num_diff_bit = 0;
unsigned int index_local=0;
if(thread_idx < di_index_subset_size)
for(int i = 0 ; i < di_query_subset_size ; i ++){
xor_result = di_query_subset[i] xor di_index_subset[thread_idx];
num_diff_bit = shared_static_table[xor_result & 0xff] + shared_static_table[xor_result>>8 & 0xff]
+ shared_static_table[xor_result>>16 & 0xff] + shared_static_table[xor_result>>24 & 0xff]
+ shared_static_table[xor_result>>32 & 0xff] + shared_static_table[xor_result>>40 & 0xff]
+ shared_static_table[xor_result>>48 & 0xff] + shared_static_table[xor_result>>56 & 0xff];
if( num_diff_bit < threshold ){
index_local = atomicAdd(&result_num[(block_idy*512+i)], 1);
if(index_local>=max_sim_num) break;
result[(block_idy*512+i)*max_sim_num + index_local] = di_index_subset_begin+thread_idx;
}
}
}
#endif // #ifndef _HELLOWORLD_KERNEL_H_
| 8f171aa122a8450cb11344ecf8dc1e26be0b4001.cu | #ifndef _HELLOWORLD_KERNEL_H_
#define _HELLOWORLD_KERNEL_H_
#include <stdio.h>
///////////////////////////////////////////////////////////
// Simple Hello World kernel
// @param gpu_odata output data in global memory
///////////////////////////////////////////////////////////
__global__ void CudaTest_kernel(int max_sim_num, int threshold, unsigned long* di_index, unsigned long* di_query, unsigned long* result, char* di_static_table, unsigned int* result_num, unsigned int frame_num_index, unsigned int frame_num_query){
const unsigned int thread_idx = threadIdx.x;
const unsigned int block_idx = blockIdx.x;
const unsigned int block_idy = blockIdx.y;
const unsigned int di_index_subset_begin = block_idx*1024;
const unsigned int di_index_subset_size = ((block_idx+1)*1024>frame_num_index)?(frame_num_index%1024):1024;
const unsigned int di_query_subset_begin = block_idy*512;
const unsigned int di_query_subset_size = ((block_idy+1)*512>frame_num_query)?(frame_num_query%512):512;
__shared__ unsigned long di_index_subset[1024];
__shared__ unsigned long di_query_subset[512];
__shared__ char shared_static_table[256];
if(thread_idx<di_index_subset_size)
di_index_subset[thread_idx] = di_index[di_index_subset_begin+thread_idx];
if(thread_idx<di_query_subset_size)
di_query_subset[thread_idx] = di_query[di_query_subset_begin+thread_idx];
if(thread_idx<256)
shared_static_table[thread_idx]= di_static_table[thread_idx];
__syncthreads();
unsigned long xor_result = 0;
unsigned int num_diff_bit = 0;
unsigned int index_local=0;
if(thread_idx < di_index_subset_size)
for(int i = 0 ; i < di_query_subset_size ; i ++){
xor_result = di_query_subset[i] xor di_index_subset[thread_idx];
num_diff_bit = shared_static_table[xor_result & 0xff] + shared_static_table[xor_result>>8 & 0xff]
+ shared_static_table[xor_result>>16 & 0xff] + shared_static_table[xor_result>>24 & 0xff]
+ shared_static_table[xor_result>>32 & 0xff] + shared_static_table[xor_result>>40 & 0xff]
+ shared_static_table[xor_result>>48 & 0xff] + shared_static_table[xor_result>>56 & 0xff];
if( num_diff_bit < threshold ){
index_local = atomicAdd(&result_num[(block_idy*512+i)], 1);
if(index_local>=max_sim_num) break;
result[(block_idy*512+i)*max_sim_num + index_local] = di_index_subset_begin+thread_idx;
}
}
}
#endif // #ifndef _HELLOWORLD_KERNEL_H_
|
3b7428b4ee8e9c4c2eea53108e73e2d0cad6b771.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void blur_kernel(int *image_d,float *filter_d,int *blurimage_d,int N1,int N2) {
int row = threadIdx.x + blockDim.x*blockIdx.x;
int col = threadIdx.y + blockDim.y*blockIdx.y;
if(row < N1 && col < N2) {
int i,j;
float sum = 0,wsum = 0;
for(int i=0;i<3;i++) {
for(j=0;j<3;j++) {
wsum += *(filter_d + i*3 + j);
}
}
int k = 0,l = 0;
for(i=row-1;i<=row+1;i++,k++) {
l = 0;
for(j=col-1;j<=col+1;j++,l++) {
if(i >= 0 && j >= 0 && i < N1 && j < N2) {
sum += ((*(image_d + i*N2+j)) * (*(filter_d + k*3+l)));
}
}
}
blurimage_d[row*N2+col] = (int)(sum/wsum);
}
}
int main() {
int N1,N2;
N1 = 100;
N2 = 100;
int *image_h = (int*)malloc(N1*N2*sizeof(int));
int i,j;
for(i=0;i<N1;i++) {
for(j=0;j<N2;j++) {
*(image_h + i*N2 + j) = rand()%256;
}
}
float f[3][3] = {{1.0,2.0,1.0},{2.0,3.0,2.0},{1.0,2.0,1.0}};
float *filter_h = (float*)malloc(3*3*sizeof(float));
for(i=0;i<3;i++) {
for(j=0;j<3;j++) {
*(filter_h + i*3 + j) = f[i][j];
}
}
printf("Original Image:\n");
for(i=0;i<N1;i++) {
for(j=0;j<N2;j++) {
printf("%d ",*(image_h + i*N2 + j));
}
printf("\n");
}
printf("\n");
int *image_d,*blurimage_d;
float *filter_d;
hipMalloc((void**)&image_d,N1*N2*sizeof(int));
hipMalloc((void**)&filter_d,3*3*sizeof(float));
hipMalloc((void**)&blurimage_d,N1*N2*sizeof(int));
hipMemcpy(image_d,image_h,N1*N2*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(filter_d,filter_h,3*3*sizeof(float),hipMemcpyHostToDevice);
dim3 grid(10,10);
dim3 block(10,10);
hipLaunchKernelGGL(( blur_kernel), dim3(grid),dim3(block), 0, 0, image_d,filter_d,blurimage_d,N1,N2);
hipMemcpy(image_h,blurimage_d,N1*N2*sizeof(int),hipMemcpyDeviceToHost);
printf("Blurred Image:\n");
for(i=0;i<N1;i++) {
for(j=0;j<N2;j++) {
printf("%d ",*(image_h + i*N2 + j));
}
printf("\n");
}
printf("\n");
free(image_h);
free(filter_h);
hipFree(image_d);
hipFree(filter_d);
hipFree(blurimage_d);
}
| 3b7428b4ee8e9c4c2eea53108e73e2d0cad6b771.cu | #include <stdio.h>
#include <stdlib.h>
__global__ void blur_kernel(int *image_d,float *filter_d,int *blurimage_d,int N1,int N2) {
int row = threadIdx.x + blockDim.x*blockIdx.x;
int col = threadIdx.y + blockDim.y*blockIdx.y;
if(row < N1 && col < N2) {
int i,j;
float sum = 0,wsum = 0;
for(int i=0;i<3;i++) {
for(j=0;j<3;j++) {
wsum += *(filter_d + i*3 + j);
}
}
int k = 0,l = 0;
for(i=row-1;i<=row+1;i++,k++) {
l = 0;
for(j=col-1;j<=col+1;j++,l++) {
if(i >= 0 && j >= 0 && i < N1 && j < N2) {
sum += ((*(image_d + i*N2+j)) * (*(filter_d + k*3+l)));
}
}
}
blurimage_d[row*N2+col] = (int)(sum/wsum);
}
}
int main() {
int N1,N2;
N1 = 100;
N2 = 100;
int *image_h = (int*)malloc(N1*N2*sizeof(int));
int i,j;
for(i=0;i<N1;i++) {
for(j=0;j<N2;j++) {
*(image_h + i*N2 + j) = rand()%256;
}
}
float f[3][3] = {{1.0,2.0,1.0},{2.0,3.0,2.0},{1.0,2.0,1.0}};
float *filter_h = (float*)malloc(3*3*sizeof(float));
for(i=0;i<3;i++) {
for(j=0;j<3;j++) {
*(filter_h + i*3 + j) = f[i][j];
}
}
printf("Original Image:\n");
for(i=0;i<N1;i++) {
for(j=0;j<N2;j++) {
printf("%d ",*(image_h + i*N2 + j));
}
printf("\n");
}
printf("\n");
int *image_d,*blurimage_d;
float *filter_d;
cudaMalloc((void**)&image_d,N1*N2*sizeof(int));
cudaMalloc((void**)&filter_d,3*3*sizeof(float));
cudaMalloc((void**)&blurimage_d,N1*N2*sizeof(int));
cudaMemcpy(image_d,image_h,N1*N2*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(filter_d,filter_h,3*3*sizeof(float),cudaMemcpyHostToDevice);
dim3 grid(10,10);
dim3 block(10,10);
blur_kernel<<<grid,block>>>(image_d,filter_d,blurimage_d,N1,N2);
cudaMemcpy(image_h,blurimage_d,N1*N2*sizeof(int),cudaMemcpyDeviceToHost);
printf("Blurred Image:\n");
for(i=0;i<N1;i++) {
for(j=0;j<N2;j++) {
printf("%d ",*(image_h + i*N2 + j));
}
printf("\n");
}
printf("\n");
free(image_h);
free(filter_h);
cudaFree(image_d);
cudaFree(filter_d);
cudaFree(blurimage_d);
}
|
da4442d71587bf21cb82d933265ee4f6138de51b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* My heavily modified version of the NVIDIA transpose code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
//#include <cutil.h>
#include "/opt/nvidia/cuda/common/inc/cutil.h"
// includes, kernels
#include <transpose_kernel.cu>
// Thread block size
//#define BLOCK_SIZE 4
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C" void computeGold( float* refdata, float* idata,
const unsigned int size_a, const unsigned int size_b,
const unsigned int size_c, const unsigned int size_d );
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
CUT_EXIT(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int temp;
//printf("argc = %d\n",argc);
//printf("argv[1] = %s\n",argv[1]);
if (argc > 1){
temp = atoi(argv[1]);
printf("size = %d\n",temp);
} else {
temp = 4;
}
// number of runs to average timing over
int numIterations = 10;
// size of the matrix
#ifdef __DEVICE_EMULATION__
const unsigned int size_a = 4;
const unsigned int size_b = 4;
const unsigned int size_c = 4;
const unsigned int size_d = 4;
#else
unsigned int size = temp;
const unsigned int size_a = size;
const unsigned int size_b = size;
const unsigned int size_c = size;
const unsigned int size_d = size;
#endif
// size of memory required to store the matrix
const unsigned int mem_size = sizeof(float) * size_a * size_b * size_c * size_d;
unsigned int timer;
cutCreateTimer(&timer);
CUT_DEVICE_INIT(argc, argv);
// allocate host memory
float* h_idata = (float*) malloc(mem_size);
// initalize the memory
unsigned int num=0;
for( unsigned int i = 0; i < (size_a); ++i){
for( unsigned int j = 0; j < (size_b); ++j){
for( unsigned int k = 0; k < (size_c); ++k){
for( unsigned int l = 0; l < (size_d); ++l){
h_idata[num++] = (float) (i + j*10 + k*100 + l*1000);
}
}
}
}
// to zero the device memory
float* h_zero = (float*) calloc(mem_size,sizeof(float));
// allocate device memory
float* d_idata;
float* d_odata;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, mem_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, mem_size));
// copy host memory to device (zero output array)
CUDA_SAFE_CALL( hipMemcpy( d_idata, h_idata, mem_size,
hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy( d_odata, h_zero, mem_size,
hipMemcpyHostToDevice) );
printf("Transposing a %d by %d by %d by %d matrix of floats...\n", size_a, size_b, size_c, size_d);
// setup execution parameters
dim3 dimGrid((int)ceil((double)size_c/(double)BLOCK_SIZE),(int)ceil((double)size_d/(double)BLOCK_SIZE),1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
// warmup so we don't time CUDA startup
hipLaunchKernelGGL(( transpose), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_odata, d_idata, size_a, size_b, size_c, size_d);
// execute the kernel
cutStartTimer(timer);
for (int i = 0; i < numIterations; ++i){
hipLaunchKernelGGL(( transpose), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_odata, d_idata, size_a, size_b, size_c, size_d);
}
hipDeviceSynchronize();
cutStopTimer(timer);
float gpuTime = cutGetTimerValue(timer);
printf("GPU transpose average time: %0.3f ms\n", gpuTime / numIterations);
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
float* h_odata = (float*) malloc(mem_size);
CUDA_SAFE_CALL( hipMemcpy( h_odata, d_odata, mem_size,
hipMemcpyDeviceToHost) );
// compute refdata solution
float* refdata = (float*) malloc( mem_size);
// execute the kernel
cutResetTimer(timer);
cutStartTimer(timer);
for (int i = 0; i < numIterations; ++i){
computeGold( refdata, h_idata, size_a, size_b, size_c, size_d);
}
cutStopTimer(timer);
float cpuTime = cutGetTimerValue(timer);
printf("CPU transpose average time: %0.3f ms\n", cpuTime / numIterations);
// check result
if((size_a * size_b * size_c * size_d)<100000){
unsigned int offset;
printf("==================================================================\n");
printf(" Initial Reference OutputData\n");
printf("==================================================================\n");
for( unsigned int i = 0; i < (size_a); ++i){
for( unsigned int j = 0; j < (size_b); ++j){
for( unsigned int k = 0; k < (size_c); ++k){
for( unsigned int l = 0; l < (size_d); ++l){
offset = i+size_b*(j+size_c*(k+size_d*l));
if (h_odata[offset] != refdata[offset]){
printf("%3d %3d %3d %3d", i, j, k, l);
printf("%14.7f",h_idata[offset]);
printf("%14.7f",refdata[offset]);
printf("%14.7f",h_odata[offset]);
//printf("%7d",(int)(h_odata[offset] == refdata[offset]));
printf("\n");
}
}
}
}
}
}
CUTBoolean res = cutComparef( refdata, h_odata, size_a * size_b * size_c * size_d);
printf("==================================================================\n");
printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED");
printf("==================================================================\n");
printf("GPU transpose average time: %0.3f ms\n", gpuTime / numIterations);
printf("CPU transpose average time: %0.3f ms\n", cpuTime / numIterations);
printf("Averaged over %d runs\n", numIterations);
// cleanup memory
free(h_idata);
free(h_odata);
free( refdata);
CUDA_SAFE_CALL(hipFree(d_idata));
CUDA_SAFE_CALL(hipFree(d_odata));
CUT_SAFE_CALL(cutDeleteTimer(timer));
}
| da4442d71587bf21cb82d933265ee4f6138de51b.cu | /*
* My heavily modified version of the NVIDIA transpose code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
//#include <cutil.h>
#include "/opt/nvidia/cuda/common/inc/cutil.h"
// includes, kernels
#include <transpose_kernel.cu>
// Thread block size
//#define BLOCK_SIZE 4
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C" void computeGold( float* refdata, float* idata,
const unsigned int size_a, const unsigned int size_b,
const unsigned int size_c, const unsigned int size_d );
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
CUT_EXIT(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int temp;
//printf("argc = %d\n",argc);
//printf("argv[1] = %s\n",argv[1]);
if (argc > 1){
temp = atoi(argv[1]);
printf("size = %d\n",temp);
} else {
temp = 4;
}
// number of runs to average timing over
int numIterations = 10;
// size of the matrix
#ifdef __DEVICE_EMULATION__
const unsigned int size_a = 4;
const unsigned int size_b = 4;
const unsigned int size_c = 4;
const unsigned int size_d = 4;
#else
unsigned int size = temp;
const unsigned int size_a = size;
const unsigned int size_b = size;
const unsigned int size_c = size;
const unsigned int size_d = size;
#endif
// size of memory required to store the matrix
const unsigned int mem_size = sizeof(float) * size_a * size_b * size_c * size_d;
unsigned int timer;
cutCreateTimer(&timer);
CUT_DEVICE_INIT(argc, argv);
// allocate host memory
float* h_idata = (float*) malloc(mem_size);
// initalize the memory
unsigned int num=0;
for( unsigned int i = 0; i < (size_a); ++i){
for( unsigned int j = 0; j < (size_b); ++j){
for( unsigned int k = 0; k < (size_c); ++k){
for( unsigned int l = 0; l < (size_d); ++l){
h_idata[num++] = (float) (i + j*10 + k*100 + l*1000);
}
}
}
}
// to zero the device memory
float* h_zero = (float*) calloc(mem_size,sizeof(float));
// allocate device memory
float* d_idata;
float* d_odata;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, mem_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, mem_size));
// copy host memory to device (zero output array)
CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_idata, mem_size,
cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy( d_odata, h_zero, mem_size,
cudaMemcpyHostToDevice) );
printf("Transposing a %d by %d by %d by %d matrix of floats...\n", size_a, size_b, size_c, size_d);
// setup execution parameters
dim3 dimGrid((int)ceil((double)size_c/(double)BLOCK_SIZE),(int)ceil((double)size_d/(double)BLOCK_SIZE),1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
// warmup so we don't time CUDA startup
transpose<<< dimGrid, dimBlock >>>(d_odata, d_idata, size_a, size_b, size_c, size_d);
// execute the kernel
cutStartTimer(timer);
for (int i = 0; i < numIterations; ++i){
transpose<<< dimGrid, dimBlock >>>(d_odata, d_idata, size_a, size_b, size_c, size_d);
}
cudaThreadSynchronize();
cutStopTimer(timer);
float gpuTime = cutGetTimerValue(timer);
printf("GPU transpose average time: %0.3f ms\n", gpuTime / numIterations);
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
float* h_odata = (float*) malloc(mem_size);
CUDA_SAFE_CALL( cudaMemcpy( h_odata, d_odata, mem_size,
cudaMemcpyDeviceToHost) );
// compute refdata solution
float* refdata = (float*) malloc( mem_size);
// execute the kernel
cutResetTimer(timer);
cutStartTimer(timer);
for (int i = 0; i < numIterations; ++i){
computeGold( refdata, h_idata, size_a, size_b, size_c, size_d);
}
cutStopTimer(timer);
float cpuTime = cutGetTimerValue(timer);
printf("CPU transpose average time: %0.3f ms\n", cpuTime / numIterations);
// check result
if((size_a * size_b * size_c * size_d)<100000){
unsigned int offset;
printf("==================================================================\n");
printf(" Initial Reference OutputData\n");
printf("==================================================================\n");
for( unsigned int i = 0; i < (size_a); ++i){
for( unsigned int j = 0; j < (size_b); ++j){
for( unsigned int k = 0; k < (size_c); ++k){
for( unsigned int l = 0; l < (size_d); ++l){
offset = i+size_b*(j+size_c*(k+size_d*l));
if (h_odata[offset] != refdata[offset]){
printf("%3d %3d %3d %3d", i, j, k, l);
printf("%14.7f",h_idata[offset]);
printf("%14.7f",refdata[offset]);
printf("%14.7f",h_odata[offset]);
//printf("%7d",(int)(h_odata[offset] == refdata[offset]));
printf("\n");
}
}
}
}
}
}
CUTBoolean res = cutComparef( refdata, h_odata, size_a * size_b * size_c * size_d);
printf("==================================================================\n");
printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED");
printf("==================================================================\n");
printf("GPU transpose average time: %0.3f ms\n", gpuTime / numIterations);
printf("CPU transpose average time: %0.3f ms\n", cpuTime / numIterations);
printf("Averaged over %d runs\n", numIterations);
// cleanup memory
free(h_idata);
free(h_odata);
free( refdata);
CUDA_SAFE_CALL(cudaFree(d_idata));
CUDA_SAFE_CALL(cudaFree(d_odata));
CUT_SAFE_CALL(cutDeleteTimer(timer));
}
|
f65a82437a259828b211e4eac95111df025bd5e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include <memory>
#include "dali/kernels/audio/mel_scale/mel_filter_bank_gpu.h"
#include "dali/core/tensor_shape_print.h"
namespace dali {
namespace kernels {
namespace audio {
const int kBlockDim2 = 32;
const int kBlockDim1 = 1024;
template <typename T>
struct BlockDesc {
const T *in_frame;
T *out_frame;
union {
struct { // outer-dim fft
int64_t start_window;
int64_t frame_nwindows;
};
struct { // inner-dim fft
int64_t block_start;
int64_t out_frame_size;
};
};
};
template <typename T>
__device__ T calcMel(const T* in_frame, int mel_bin,
const T *weights_down, const int *interval_ends,
int fft_stride, int fft_shift,
T norm_factor) {
T out = 0;
int fftbin = interval_ends[mel_bin];
int fftbin_end = interval_ends[mel_bin + 1];
const T *in = in_frame + fftbin * fft_stride + fft_shift;
for (; fftbin < fftbin_end; ++fftbin, in += fft_stride) {
auto weight_up = T(1) - weights_down[fftbin];
weight_up *= norm_factor;
out += *in * weight_up;
}
fftbin_end = interval_ends[mel_bin + 2];
in = in_frame + fftbin * fft_stride + fft_shift;
for (; fftbin < fftbin_end; ++fftbin, in += fft_stride) {
auto weight_down = weights_down[fftbin];
weight_down *= norm_factor;
out += *in * weight_down;
}
return out;
}
// For layouts where the frequency is not the innermost dimension, data is flattened into
// 3 dimensions - frame, frequency, time
// Every frame is treated as independent two-dimensional sample
template <typename T>
__global__ void MelFilterBankKernel(const BlockDesc<T> *block_desc,
const T *weights_down, const int *interval_ends,
bool normalize, const T *norm_factors,
int mel_bins) {
auto block_id = blockIdx.x;
const T *in_frame = block_desc[block_id].in_frame;
T *out_frame = block_desc[block_id].out_frame;
int mel_bin = blockIdx.y * kBlockDim2 + threadIdx.y;
if (mel_bin >= mel_bins)
return;
int64_t window = block_desc[block_id].start_window + threadIdx.x;
int64_t nwindows = block_desc[block_id].frame_nwindows;
if (window >= nwindows)
return;
T *out = out_frame + mel_bin * nwindows + window;
T norm_factor = (normalize) ? norm_factors[mel_bin] : 1;
*out = calcMel(in_frame, mel_bin,
weights_down, interval_ends,
nwindows, window, norm_factor);
}
// For layouts with the innermost frequency dimension, data is flattened
// to two dimensions - time, frequency
template <typename T>
__global__ void MelFilterBankKernelInnerFft(const BlockDesc<T> *block_desc,
const T *weights_down, const int *interval_ends,
bool normalize, const T *norm_factors,
int mel_bins, int64_t fftdim) {
auto block_id = blockIdx.x;
auto idx = block_desc[block_id].block_start + threadIdx.x;
if (idx >= block_desc[block_id].out_frame_size)
return;
auto window = idx / mel_bins;
auto mel_bin = idx % mel_bins;
const T *in = block_desc[block_id].in_frame;
T *out = block_desc[block_id].out_frame;
T norm_factor = (normalize) ? norm_factors[mel_bin] : 1;
*(out + idx) = calcMel(in + window * fftdim, mel_bin,
weights_down, interval_ends, 1, 0, norm_factor);
}
template <typename T>
class MelFilterBankGpu<T>::Impl : public MelFilterImplBase<T> {
public:
template <typename MelScale>
Impl(MelScale mel_scale, const MelFilterBankArgs &args) :
MelFilterImplBase<T>(mel_scale, args),
interval_ends_(args.nfilter + 2) {
double mel = mel_low_ + mel_delta_;
interval_ends_[0] = fftbin_start_;
interval_ends_[args.nfilter + 1] = fftbin_end_ + 1;
for (int interval = 1; interval < args_.nfilter + 1; interval++, mel += mel_delta_) {
double freq = mel_scale.mel_to_hz(mel);
interval_ends_[interval] = ::ceil(freq / hz_step_);
}
}
void Setup(ScratchpadEstimator &se, const TensorListShape<> &in_shape) {
se.add<int>(AllocType::GPU, interval_ends_.size());
se.add<T>(AllocType::GPU, weights_down_.size());
if (args_.normalize)
se.add<T>(AllocType::GPU, norm_factors_.size());
inner_fft_ = true;
for (int s = 0; s < in_shape.size(); s++) {
inner_fft_ &= volume(in_shape.tensor_shape_span(s).begin() + args_.axis + 1,
in_shape.tensor_shape_span(s).end()) == 1;
}
fft_dim_ = in_shape.tensor_shape_span(0)[args_.axis];
if (inner_fft_) {
SetupBlockDescsInnerFft(se, in_shape);
} else {
SetupBlockDescsOuterFft(se, in_shape);
}
}
void Compute(const T* const* in_list, T **out_list, int ndim,
Scratchpad *scratchpad, hipStream_t stream) {
if (inner_fft_) {
FillBlockDescsInnerFft(in_list, out_list);
} else {
FillBlockDescsOuterFft(in_list, out_list);
}
auto block_descs = scratchpad->ToGPU(stream, block_descs_);
auto interval_ends = scratchpad->ToGPU(stream, interval_ends_);
auto weights_down = scratchpad->ToGPU(stream, weights_down_);
T *norm_factors = nullptr;
if (args_.normalize)
norm_factors = scratchpad->ToGPU(stream, norm_factors_);
if (inner_fft_) {
hipLaunchKernelGGL(( MelFilterBankKernelInnerFft)
, dim3(block_descs_.size()), dim3(kBlockDim1), 0, stream,
block_descs, weights_down, interval_ends, args_.normalize,
norm_factors, args_.nfilter, fft_dim_);
} else {
dim3 block(kBlockDim2, ::min(args_.nfilter, kBlockDim2));
dim3 grid(block_descs_.size(), div_ceil(args_.nfilter, kBlockDim2));
hipLaunchKernelGGL(( MelFilterBankKernel)
, dim3(grid), dim3(block), 0, stream, block_descs, weights_down, interval_ends,
args_.normalize, norm_factors, args_.nfilter);
}
CUDA_CALL(hipGetLastError());
}
using MelFilterImplBase<T>::Args;
private:
void SetupBlockDescsOuterFft(ScratchpadEstimator &se, const TensorListShape<> &in_shape) {
nframes_.clear();
nwindows_.clear();
block_descs_.clear();
auto batch_size = in_shape.num_samples();
for (int64_t ti = 0; ti < batch_size; ++ti) {
const auto &tshape = in_shape.tensor_shape(ti);
nframes_.push_back(volume(tshape.begin(), tshape.begin() + args_.axis));
nwindows_.push_back(volume(tshape.begin() + args_.axis + 1, tshape.end()));
for (int64_t s = 0; s < nframes_.back(); ++s) {
auto nblocks = div_ceil(nwindows_.back(), kBlockDim2);
for (int64_t b = 0; b < nblocks; ++b) {
block_descs_.push_back(BlockDesc<T>{nullptr, nullptr,
{b * kBlockDim2, nwindows_.back()}});
}
}
}
se.add<BlockDesc<T>>(AllocType::GPU, block_descs_.size());
}
void SetupBlockDescsInnerFft(ScratchpadEstimator &se, const TensorListShape<> &in_shape) {
nframes_.clear();
nwindows_.clear();
block_descs_.clear();
auto batch_size = in_shape.num_samples();
for (int64_t ti = 0; ti < batch_size; ++ti) {
const auto &tshape = in_shape.tensor_shape(ti);
auto sample_size = volume(tshape.begin(), tshape.begin() + args_.axis) * args_.nfilter;
auto nblocks = div_ceil(sample_size, kBlockDim1);
for (int b = 0; b < nblocks; ++b) {
block_descs_.push_back(BlockDesc<T>{nullptr, nullptr, {b * kBlockDim1, sample_size}});
}
}
se.add<BlockDesc<T>>(AllocType::GPU, block_descs_.size());
}
void FillBlockDescsOuterFft(const T* const* in_list, T **out_list) {
int64_t block_id = 0;
for (uint64_t ti = 0; ti < nframes_.size(); ++ti) {
const T *in = in_list[ti];
T *out = out_list[ti];
for (int64_t s = 0; s < nframes_[ti]; ++s) {
auto nblocks = div_ceil(nwindows_[ti], kBlockDim2);
for (int b = 0; b < nblocks; ++b) {
block_descs_[block_id].in_frame = in;
block_descs_[block_id].out_frame = out;
++block_id;
}
in += nwindows_[ti] * fft_dim_;
out += nwindows_[ti] * args_.nfilter;
}
}
}
void FillBlockDescsInnerFft(const T* const* in_list, T **out_list) {
int block_id = 0;
int sample = 0;
while (block_id < static_cast<int>(block_descs_.size())) {
auto sample_size = block_descs_[block_id].out_frame_size;
auto nblocks = div_ceil(sample_size, kBlockDim1);
for (int i = 0; i < nblocks; ++i, ++block_id) {
block_descs_[block_id].in_frame = in_list[sample];
block_descs_[block_id].out_frame = out_list[sample];
}
++sample;
}
}
std::vector<int> interval_ends_;
std::vector<int64_t> nframes_;
std::vector<int64_t> nwindows_;
std::vector<BlockDesc<T>> block_descs_;
int64_t fft_dim_ = 0;
bool inner_fft_ = false;
USE_MEL_FILTER_IMPL_MEMBERS(T);
};
template <typename T>
KernelRequirements MelFilterBankGpu<T>::Setup(KernelContext &context,
const InListGPU<T> &in,
const MelFilterBankArgs &original_args) {
auto args = original_args;
args.axis = args.axis >= 0 ? args.axis : in.sample_dim() - 2;
TensorListShape<> out_shape = in.shape;
for (int64_t s = 0; s < out_shape.num_samples(); ++s) {
out_shape.tensor_shape_span(s)[args.axis] = args.nfilter;
}
KernelRequirements req;
req.output_shapes = {out_shape};
auto fftdim = in.shape.tensor_shape_span(0)[args.axis];
for (int s = 1; s < in.shape.num_samples(); ++s) {
DALI_ENFORCE(in.shape.tensor_shape_span(s)[args.axis] == fftdim,
"All samples should have the same FFT dimension");
}
ScratchpadEstimator se;
args.nfft = args.nfft > 0 ? args.nfft : 2 * (in.shape[0][args.axis] - 1);
args.freq_high = args.freq_high > 0 ? args.freq_high : args.sample_rate / 2;
if (!impl_ || impl_->Args() != args) {
impl_.reset();
switch (args.mel_formula) {
case MelScaleFormula::HTK:
impl_ = std::make_unique<Impl>(HtkMelScale<T>(), args);
break;
case MelScaleFormula::Slaney:
default:
impl_ = std::make_unique<Impl>(SlaneyMelScale<T>(), args);
break;
}
}
impl_->Setup(se, in.shape);
req.scratch_sizes = se.sizes;
return req;
}
template <typename T>
void MelFilterBankGpu<T>::Run(KernelContext &context, OutListGPU<T> &out, const InListGPU<T> &in) {
assert(impl_ != nullptr);
impl_->Compute(in.data.data(), out.data.data(), in.sample_dim(), context.scratchpad,
context.gpu.stream);
}
template <typename T>
MelFilterBankGpu<T>::MelFilterBankGpu() {}
template <typename T>
MelFilterBankGpu<T>::~MelFilterBankGpu() {}
template class MelFilterBankGpu<float>;
template class MelFilterBankGpu<double>;
} // namespace audio
} // namespace kernels
} // namespace dali
| f65a82437a259828b211e4eac95111df025bd5e7.cu | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include <memory>
#include "dali/kernels/audio/mel_scale/mel_filter_bank_gpu.h"
#include "dali/core/tensor_shape_print.h"
namespace dali {
namespace kernels {
namespace audio {
const int kBlockDim2 = 32;
const int kBlockDim1 = 1024;
template <typename T>
struct BlockDesc {
const T *in_frame;
T *out_frame;
union {
struct { // outer-dim fft
int64_t start_window;
int64_t frame_nwindows;
};
struct { // inner-dim fft
int64_t block_start;
int64_t out_frame_size;
};
};
};
template <typename T>
__device__ T calcMel(const T* in_frame, int mel_bin,
const T *weights_down, const int *interval_ends,
int fft_stride, int fft_shift,
T norm_factor) {
T out = 0;
int fftbin = interval_ends[mel_bin];
int fftbin_end = interval_ends[mel_bin + 1];
const T *in = in_frame + fftbin * fft_stride + fft_shift;
for (; fftbin < fftbin_end; ++fftbin, in += fft_stride) {
auto weight_up = T(1) - weights_down[fftbin];
weight_up *= norm_factor;
out += *in * weight_up;
}
fftbin_end = interval_ends[mel_bin + 2];
in = in_frame + fftbin * fft_stride + fft_shift;
for (; fftbin < fftbin_end; ++fftbin, in += fft_stride) {
auto weight_down = weights_down[fftbin];
weight_down *= norm_factor;
out += *in * weight_down;
}
return out;
}
// For layouts where the frequency is not the innermost dimension, data is flattened into
// 3 dimensions - frame, frequency, time
// Every frame is treated as independent two-dimensional sample
template <typename T>
__global__ void MelFilterBankKernel(const BlockDesc<T> *block_desc,
const T *weights_down, const int *interval_ends,
bool normalize, const T *norm_factors,
int mel_bins) {
auto block_id = blockIdx.x;
const T *in_frame = block_desc[block_id].in_frame;
T *out_frame = block_desc[block_id].out_frame;
int mel_bin = blockIdx.y * kBlockDim2 + threadIdx.y;
if (mel_bin >= mel_bins)
return;
int64_t window = block_desc[block_id].start_window + threadIdx.x;
int64_t nwindows = block_desc[block_id].frame_nwindows;
if (window >= nwindows)
return;
T *out = out_frame + mel_bin * nwindows + window;
T norm_factor = (normalize) ? norm_factors[mel_bin] : 1;
*out = calcMel(in_frame, mel_bin,
weights_down, interval_ends,
nwindows, window, norm_factor);
}
// For layouts with the innermost frequency dimension, data is flattened
// to two dimensions - time, frequency
template <typename T>
__global__ void MelFilterBankKernelInnerFft(const BlockDesc<T> *block_desc,
const T *weights_down, const int *interval_ends,
bool normalize, const T *norm_factors,
int mel_bins, int64_t fftdim) {
auto block_id = blockIdx.x;
auto idx = block_desc[block_id].block_start + threadIdx.x;
if (idx >= block_desc[block_id].out_frame_size)
return;
auto window = idx / mel_bins;
auto mel_bin = idx % mel_bins;
const T *in = block_desc[block_id].in_frame;
T *out = block_desc[block_id].out_frame;
T norm_factor = (normalize) ? norm_factors[mel_bin] : 1;
*(out + idx) = calcMel(in + window * fftdim, mel_bin,
weights_down, interval_ends, 1, 0, norm_factor);
}
template <typename T>
class MelFilterBankGpu<T>::Impl : public MelFilterImplBase<T> {
public:
template <typename MelScale>
Impl(MelScale mel_scale, const MelFilterBankArgs &args) :
MelFilterImplBase<T>(mel_scale, args),
interval_ends_(args.nfilter + 2) {
double mel = mel_low_ + mel_delta_;
interval_ends_[0] = fftbin_start_;
interval_ends_[args.nfilter + 1] = fftbin_end_ + 1;
for (int interval = 1; interval < args_.nfilter + 1; interval++, mel += mel_delta_) {
double freq = mel_scale.mel_to_hz(mel);
interval_ends_[interval] = std::ceil(freq / hz_step_);
}
}
void Setup(ScratchpadEstimator &se, const TensorListShape<> &in_shape) {
se.add<int>(AllocType::GPU, interval_ends_.size());
se.add<T>(AllocType::GPU, weights_down_.size());
if (args_.normalize)
se.add<T>(AllocType::GPU, norm_factors_.size());
inner_fft_ = true;
for (int s = 0; s < in_shape.size(); s++) {
inner_fft_ &= volume(in_shape.tensor_shape_span(s).begin() + args_.axis + 1,
in_shape.tensor_shape_span(s).end()) == 1;
}
fft_dim_ = in_shape.tensor_shape_span(0)[args_.axis];
if (inner_fft_) {
SetupBlockDescsInnerFft(se, in_shape);
} else {
SetupBlockDescsOuterFft(se, in_shape);
}
}
void Compute(const T* const* in_list, T **out_list, int ndim,
Scratchpad *scratchpad, cudaStream_t stream) {
if (inner_fft_) {
FillBlockDescsInnerFft(in_list, out_list);
} else {
FillBlockDescsOuterFft(in_list, out_list);
}
auto block_descs = scratchpad->ToGPU(stream, block_descs_);
auto interval_ends = scratchpad->ToGPU(stream, interval_ends_);
auto weights_down = scratchpad->ToGPU(stream, weights_down_);
T *norm_factors = nullptr;
if (args_.normalize)
norm_factors = scratchpad->ToGPU(stream, norm_factors_);
if (inner_fft_) {
MelFilterBankKernelInnerFft
<<<block_descs_.size(), kBlockDim1, 0, stream>>>
(block_descs, weights_down, interval_ends, args_.normalize,
norm_factors, args_.nfilter, fft_dim_);
} else {
dim3 block(kBlockDim2, std::min(args_.nfilter, kBlockDim2));
dim3 grid(block_descs_.size(), div_ceil(args_.nfilter, kBlockDim2));
MelFilterBankKernel
<<<grid, block, 0, stream>>>(block_descs, weights_down, interval_ends,
args_.normalize, norm_factors, args_.nfilter);
}
CUDA_CALL(cudaGetLastError());
}
using MelFilterImplBase<T>::Args;
private:
void SetupBlockDescsOuterFft(ScratchpadEstimator &se, const TensorListShape<> &in_shape) {
nframes_.clear();
nwindows_.clear();
block_descs_.clear();
auto batch_size = in_shape.num_samples();
for (int64_t ti = 0; ti < batch_size; ++ti) {
const auto &tshape = in_shape.tensor_shape(ti);
nframes_.push_back(volume(tshape.begin(), tshape.begin() + args_.axis));
nwindows_.push_back(volume(tshape.begin() + args_.axis + 1, tshape.end()));
for (int64_t s = 0; s < nframes_.back(); ++s) {
auto nblocks = div_ceil(nwindows_.back(), kBlockDim2);
for (int64_t b = 0; b < nblocks; ++b) {
block_descs_.push_back(BlockDesc<T>{nullptr, nullptr,
{b * kBlockDim2, nwindows_.back()}});
}
}
}
se.add<BlockDesc<T>>(AllocType::GPU, block_descs_.size());
}
void SetupBlockDescsInnerFft(ScratchpadEstimator &se, const TensorListShape<> &in_shape) {
nframes_.clear();
nwindows_.clear();
block_descs_.clear();
auto batch_size = in_shape.num_samples();
for (int64_t ti = 0; ti < batch_size; ++ti) {
const auto &tshape = in_shape.tensor_shape(ti);
auto sample_size = volume(tshape.begin(), tshape.begin() + args_.axis) * args_.nfilter;
auto nblocks = div_ceil(sample_size, kBlockDim1);
for (int b = 0; b < nblocks; ++b) {
block_descs_.push_back(BlockDesc<T>{nullptr, nullptr, {b * kBlockDim1, sample_size}});
}
}
se.add<BlockDesc<T>>(AllocType::GPU, block_descs_.size());
}
void FillBlockDescsOuterFft(const T* const* in_list, T **out_list) {
int64_t block_id = 0;
for (uint64_t ti = 0; ti < nframes_.size(); ++ti) {
const T *in = in_list[ti];
T *out = out_list[ti];
for (int64_t s = 0; s < nframes_[ti]; ++s) {
auto nblocks = div_ceil(nwindows_[ti], kBlockDim2);
for (int b = 0; b < nblocks; ++b) {
block_descs_[block_id].in_frame = in;
block_descs_[block_id].out_frame = out;
++block_id;
}
in += nwindows_[ti] * fft_dim_;
out += nwindows_[ti] * args_.nfilter;
}
}
}
void FillBlockDescsInnerFft(const T* const* in_list, T **out_list) {
int block_id = 0;
int sample = 0;
while (block_id < static_cast<int>(block_descs_.size())) {
auto sample_size = block_descs_[block_id].out_frame_size;
auto nblocks = div_ceil(sample_size, kBlockDim1);
for (int i = 0; i < nblocks; ++i, ++block_id) {
block_descs_[block_id].in_frame = in_list[sample];
block_descs_[block_id].out_frame = out_list[sample];
}
++sample;
}
}
std::vector<int> interval_ends_;
std::vector<int64_t> nframes_;
std::vector<int64_t> nwindows_;
std::vector<BlockDesc<T>> block_descs_;
int64_t fft_dim_ = 0;
bool inner_fft_ = false;
USE_MEL_FILTER_IMPL_MEMBERS(T);
};
template <typename T>
KernelRequirements MelFilterBankGpu<T>::Setup(KernelContext &context,
const InListGPU<T> &in,
const MelFilterBankArgs &original_args) {
auto args = original_args;
args.axis = args.axis >= 0 ? args.axis : in.sample_dim() - 2;
TensorListShape<> out_shape = in.shape;
for (int64_t s = 0; s < out_shape.num_samples(); ++s) {
out_shape.tensor_shape_span(s)[args.axis] = args.nfilter;
}
KernelRequirements req;
req.output_shapes = {out_shape};
auto fftdim = in.shape.tensor_shape_span(0)[args.axis];
for (int s = 1; s < in.shape.num_samples(); ++s) {
DALI_ENFORCE(in.shape.tensor_shape_span(s)[args.axis] == fftdim,
"All samples should have the same FFT dimension");
}
ScratchpadEstimator se;
args.nfft = args.nfft > 0 ? args.nfft : 2 * (in.shape[0][args.axis] - 1);
args.freq_high = args.freq_high > 0 ? args.freq_high : args.sample_rate / 2;
if (!impl_ || impl_->Args() != args) {
impl_.reset();
switch (args.mel_formula) {
case MelScaleFormula::HTK:
impl_ = std::make_unique<Impl>(HtkMelScale<T>(), args);
break;
case MelScaleFormula::Slaney:
default:
impl_ = std::make_unique<Impl>(SlaneyMelScale<T>(), args);
break;
}
}
impl_->Setup(se, in.shape);
req.scratch_sizes = se.sizes;
return req;
}
template <typename T>
void MelFilterBankGpu<T>::Run(KernelContext &context, OutListGPU<T> &out, const InListGPU<T> &in) {
assert(impl_ != nullptr);
impl_->Compute(in.data.data(), out.data.data(), in.sample_dim(), context.scratchpad,
context.gpu.stream);
}
template <typename T>
MelFilterBankGpu<T>::MelFilterBankGpu() {}
template <typename T>
MelFilterBankGpu<T>::~MelFilterBankGpu() {}
template class MelFilterBankGpu<float>;
template class MelFilterBankGpu<double>;
} // namespace audio
} // namespace kernels
} // namespace dali
|
ad392c693af3ab24c2a7f734b6facf1f6e9cbb3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/tensor/gather_nd_grad_impl.h"
#include "core/providers/rocm/cu_inc/common.cuh"
#include "core/providers/rocm/atomic/common.cuh"
namespace onnxruntime {
namespace rocm {
template <typename T>
__global__ void _GatherNDGradKernel(
const size_t num_slices,
const T* update_data,
T* output_data,
const size_t slice_size,
const int64_t* slice_offsets) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, num_slices * slice_size);
uint64_t slice_offset = slice_offsets[i / slice_size];
size_t j = i % slice_size;
atomic_add(output_data + slice_offset + j, update_data[i]);
};
template <typename T>
void GatherNDGradImpl(
hipStream_t stream,
const size_t num_slices,
const void* update_data,
void* output_data,
const size_t slice_size,
const int64_t* input_slice_offsets_data) {
const auto blocks_per_grid = CeilDiv(num_slices * slice_size, GridDim::maxThreadsPerBlock);
hipLaunchKernelGGL(HIP_KERNEL_NAME(_GatherNDGradKernel<T>), dim3(blocks_per_grid), dim3(GridDim::maxThreadsPerBlock), 0, stream,
num_slices, static_cast<const T*>(update_data), static_cast<T*>(output_data), slice_size, input_slice_offsets_data);
}
#define SPECIALIZED_GRAD_IMPL(T) \
template void GatherNDGradImpl<T>(hipStream_t stream, const size_t num_slices, const void* update_data, void* output_data, const size_t slice_size, const int64_t* input_slice_offsets_data)
SPECIALIZED_GRAD_IMPL(float);
SPECIALIZED_GRAD_IMPL(half);
SPECIALIZED_GRAD_IMPL(double);
} // namespace rocm
} // namespace onnxruntime
| ad392c693af3ab24c2a7f734b6facf1f6e9cbb3e.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/tensor/gather_nd_grad_impl.h"
#include "core/providers/rocm/cu_inc/common.cuh"
#include "core/providers/rocm/atomic/common.cuh"
namespace onnxruntime {
namespace rocm {
template <typename T>
__global__ void _GatherNDGradKernel(
const size_t num_slices,
const T* update_data,
T* output_data,
const size_t slice_size,
const int64_t* slice_offsets) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, num_slices * slice_size);
uint64_t slice_offset = slice_offsets[i / slice_size];
size_t j = i % slice_size;
atomic_add(output_data + slice_offset + j, update_data[i]);
};
template <typename T>
void GatherNDGradImpl(
hipStream_t stream,
const size_t num_slices,
const void* update_data,
void* output_data,
const size_t slice_size,
const int64_t* input_slice_offsets_data) {
const auto blocks_per_grid = CeilDiv(num_slices * slice_size, GridDim::maxThreadsPerBlock);
hipLaunchKernelGGL(HIP_KERNEL_NAME(_GatherNDGradKernel<T>), dim3(blocks_per_grid), dim3(GridDim::maxThreadsPerBlock), 0, stream,
num_slices, static_cast<const T*>(update_data), static_cast<T*>(output_data), slice_size, input_slice_offsets_data);
}
#define SPECIALIZED_GRAD_IMPL(T) \
template void GatherNDGradImpl<T>(hipStream_t stream, const size_t num_slices, const void* update_data, void* output_data, const size_t slice_size, const int64_t* input_slice_offsets_data)
SPECIALIZED_GRAD_IMPL(float);
SPECIALIZED_GRAD_IMPL(half);
SPECIALIZED_GRAD_IMPL(double);
} // namespace rocm
} // namespace onnxruntime
|
2bc16d4902694673d77a7cfb88e787e388d5c2a8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "elmult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *inA = NULL;
hipMalloc(&inA, XSIZE*YSIZE);
float *inB = NULL;
hipMalloc(&inB, XSIZE*YSIZE);
int length = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
elmult), dim3(gridBlock),dim3(threadBlock), 0, 0, inA,inB,length);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
elmult), dim3(gridBlock),dim3(threadBlock), 0, 0, inA,inB,length);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
elmult), dim3(gridBlock),dim3(threadBlock), 0, 0, inA,inB,length);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2bc16d4902694673d77a7cfb88e787e388d5c2a8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "elmult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *inA = NULL;
cudaMalloc(&inA, XSIZE*YSIZE);
float *inB = NULL;
cudaMalloc(&inB, XSIZE*YSIZE);
int length = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
elmult<<<gridBlock,threadBlock>>>(inA,inB,length);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
elmult<<<gridBlock,threadBlock>>>(inA,inB,length);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
elmult<<<gridBlock,threadBlock>>>(inA,inB,length);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
98cc744194a804e6ec35f112cddd69e20cfb838e.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <kNN-brute-force.cuh>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <assert.h>
#include "helper_cuda.h"
#define SHARED_SIZE_LIMIT 1024U
#define checkCudaErrors(val) check ( (val), #val, __FILE__, __LINE__ )
__device__ void cuCompare(float &distA, int &indA, float &distB, int &indB, int dir)
{
float f;
int i;
if ((distA >= distB) == dir)
{
f = distA;
distA = distB;
distB = f;
i = indA;
indA = indB;
indB = i;
}
}
__constant__ float query_dev[3];
__global__ void cuComputeDistanceGlobal( float *ref, int ref_nb , int dim, float *dist, int *ind)
{
float dx, dy, dz;
int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < ref_nb)
{
dx = ref[index * dim] - query_dev[0];
dy = ref[index * dim + 1] - query_dev[1];
dz = ref[index * dim + 2] - query_dev[2];
dist[index] = (dx * dx) + (dy * dy) + (dz * dz);
ind[index] = index;
index += gridDim.x;
}
}
__global__ void cuBitonicSortOneBlock(float *dist, int *ind, int n, int dir)
{
int blockoffset = blockIdx.x * blockDim.x * 2;
dist += blockoffset;
ind += blockoffset;
for (int size = 2; size <= blockDim.x * 2; size <<= 1)
{
int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (int stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
cuCompare(dist[pos], ind[pos], dist[pos + stride], ind[pos + stride], ddd);
}
}
}
__global__ void cuBitonicSort(float *dist, int *ind, int n, int dir)
{
int blockoffset = blockIdx.x * blockDim.x * 2;
dist += blockoffset;
ind += blockoffset;
for (int size = 2; size <= blockDim.x * 2; size <<= 1)
{
int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (int stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
cuCompare(dist[pos], ind[pos], dist[pos + stride], ind[pos + stride], ddd);
}
}
int ddd = blockIdx.x & 1;
{
for (int stride = blockDim.x; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
cuCompare(dist[pos], ind[pos], dist[pos + stride], ind[pos + stride], ddd);
}
}
}
__global__ void cuBitonicMergeGlobal(float *dist, int *ind, int n, int size, int stride , int dir)
{
int global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x;
int comparatorI = global_comparatorI & (n / 2 - 1);
int ddd = dir ^ ((comparatorI & (size / 2)) != 0);
int pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1));
cuCompare(dist[pos], ind[pos], dist[pos + stride], ind[pos + stride], ddd);
}
__global__ void cuParallelSqrt(float *dist, int k)
{
unsigned int xIndex = blockIdx.x;
if (xIndex < k)
{
dist[xIndex] = sqrt(dist[xIndex]);
}
}
__global__ void cuBitonicMergeShared(float *dist, int *ind, int n, int size, int dir)
{
int blockoffset = blockIdx.x * blockDim.x * 2;
dist += blockoffset;
ind += blockoffset;
int comparatorI = (blockIdx.x * blockDim.x + threadIdx.x) & ((n / 2) - 1);
int ddd = dir ^ ((comparatorI & (size / 2)) != 0);
for (int stride = blockDim.x; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
cuCompare(dist[pos], ind[pos], dist[pos + stride], ind[pos + stride], ddd);
}
}
void bitonic_sort(float *dist_dev, int *ind_dev, int n, int dir)
{
int max_threads_per_block = min(SHARED_SIZE_LIMIT, n);
int blockCount = n / max_threads_per_block;
int threadCount = max_threads_per_block / 2;
blockCount = max(1, blockCount);
threadCount = min(max_threads_per_block, threadCount);
if (blockCount == 1)
{
hipLaunchKernelGGL(( cuBitonicSortOneBlock) , dim3(blockCount), dim3(threadCount), 0, 0, dist_dev, ind_dev, n, dir);
}
else
{
hipLaunchKernelGGL(( cuBitonicSort) , dim3(blockCount), dim3(threadCount), 0, 0, dist_dev, ind_dev, n, dir);
for (int size = 2 * max_threads_per_block; size <= n; size <<= 1)
{
for (int stride = size / 2; stride > 0; stride >>= 1)
{
if (stride >= max_threads_per_block)
{
hipLaunchKernelGGL(( cuBitonicMergeGlobal) , dim3(blockCount), dim3(threadCount), 0, 0, dist_dev, ind_dev, n, size, stride, dir);
}
else
{
hipLaunchKernelGGL(( cuBitonicMergeShared) , dim3(blockCount), dim3(threadCount), 0, 0, dist_dev, ind_dev, n, size, dir);
break;
}
}
}
}
}
int factorRadix2(int *log2L, int L)
{
if (!L)
{
*log2L = 0;
return 0;
}
else
{
for (*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++);
return L;
}
}
void knn_brute_force_bitonic_sort(float *ref_host, int ref_nb, float *query_host, int dim, int k, float *dist_host, int *ind_host)
{
unsigned int size_of_float = sizeof(float);
unsigned int size_of_int = sizeof(int);
float *ref_dev;
float *dist_dev;
int *ind_dev;
int log2L;
int factorizationRemainder = factorRadix2(&log2L, ref_nb);
// assert(factorizationRemainder == 1);
checkCudaErrors(hipMalloc( (void **) &dist_dev, ref_nb * size_of_float));
checkCudaErrors(hipMalloc( (void **) &ind_dev, ref_nb * size_of_int));
checkCudaErrors(hipMalloc( (void **) &ref_dev, ref_nb * size_of_float * dim));
checkCudaErrors(hipMemcpy(ref_dev, ref_host, ref_nb * dim * size_of_float, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(query_dev, query_host, dim * size_of_float));
int threadCount = min(ref_nb, SHARED_SIZE_LIMIT);
int blockCount = ref_nb / threadCount;
blockCount = min(blockCount, 65000);
hipLaunchKernelGGL(( cuComputeDistanceGlobal) , dim3(blockCount), dim3(threadCount), 0, 0, ref_dev, ref_nb, dim, dist_dev, ind_dev);
bitonic_sort(dist_dev, ind_dev, ref_nb, 1);
hipLaunchKernelGGL(( cuParallelSqrt) , dim3(k), dim3(1), 0, 0, dist_dev, k);
checkCudaErrors(hipMemcpy(dist_host, dist_dev, k * size_of_float, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(ind_host, ind_dev, k * size_of_int, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(ref_dev));
checkCudaErrors(hipFree(ind_dev));
}
| 98cc744194a804e6ec35f112cddd69e20cfb838e.cu |
// Includes
#include <kNN-brute-force.cuh>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <time.h>
#include <assert.h>
#include "helper_cuda.h"
#define SHARED_SIZE_LIMIT 1024U
#define checkCudaErrors(val) check ( (val), #val, __FILE__, __LINE__ )
__device__ void cuCompare(float &distA, int &indA, float &distB, int &indB, int dir)
{
float f;
int i;
if ((distA >= distB) == dir)
{
f = distA;
distA = distB;
distB = f;
i = indA;
indA = indB;
indB = i;
}
}
__constant__ float query_dev[3];
__global__ void cuComputeDistanceGlobal( float *ref, int ref_nb , int dim, float *dist, int *ind)
{
float dx, dy, dz;
int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < ref_nb)
{
dx = ref[index * dim] - query_dev[0];
dy = ref[index * dim + 1] - query_dev[1];
dz = ref[index * dim + 2] - query_dev[2];
dist[index] = (dx * dx) + (dy * dy) + (dz * dz);
ind[index] = index;
index += gridDim.x;
}
}
__global__ void cuBitonicSortOneBlock(float *dist, int *ind, int n, int dir)
{
int blockoffset = blockIdx.x * blockDim.x * 2;
dist += blockoffset;
ind += blockoffset;
for (int size = 2; size <= blockDim.x * 2; size <<= 1)
{
int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (int stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
cuCompare(dist[pos], ind[pos], dist[pos + stride], ind[pos + stride], ddd);
}
}
}
__global__ void cuBitonicSort(float *dist, int *ind, int n, int dir)
{
int blockoffset = blockIdx.x * blockDim.x * 2;
dist += blockoffset;
ind += blockoffset;
for (int size = 2; size <= blockDim.x * 2; size <<= 1)
{
int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (int stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
cuCompare(dist[pos], ind[pos], dist[pos + stride], ind[pos + stride], ddd);
}
}
int ddd = blockIdx.x & 1;
{
for (int stride = blockDim.x; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
cuCompare(dist[pos], ind[pos], dist[pos + stride], ind[pos + stride], ddd);
}
}
}
__global__ void cuBitonicMergeGlobal(float *dist, int *ind, int n, int size, int stride , int dir)
{
int global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x;
int comparatorI = global_comparatorI & (n / 2 - 1);
int ddd = dir ^ ((comparatorI & (size / 2)) != 0);
int pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1));
cuCompare(dist[pos], ind[pos], dist[pos + stride], ind[pos + stride], ddd);
}
__global__ void cuParallelSqrt(float *dist, int k)
{
unsigned int xIndex = blockIdx.x;
if (xIndex < k)
{
dist[xIndex] = sqrt(dist[xIndex]);
}
}
__global__ void cuBitonicMergeShared(float *dist, int *ind, int n, int size, int dir)
{
int blockoffset = blockIdx.x * blockDim.x * 2;
dist += blockoffset;
ind += blockoffset;
int comparatorI = (blockIdx.x * blockDim.x + threadIdx.x) & ((n / 2) - 1);
int ddd = dir ^ ((comparatorI & (size / 2)) != 0);
for (int stride = blockDim.x; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
cuCompare(dist[pos], ind[pos], dist[pos + stride], ind[pos + stride], ddd);
}
}
void bitonic_sort(float *dist_dev, int *ind_dev, int n, int dir)
{
int max_threads_per_block = min(SHARED_SIZE_LIMIT, n);
int blockCount = n / max_threads_per_block;
int threadCount = max_threads_per_block / 2;
blockCount = max(1, blockCount);
threadCount = min(max_threads_per_block, threadCount);
if (blockCount == 1)
{
cuBitonicSortOneBlock <<< blockCount, threadCount>>>(dist_dev, ind_dev, n, dir);
}
else
{
cuBitonicSort <<< blockCount, threadCount>>>(dist_dev, ind_dev, n, dir);
for (int size = 2 * max_threads_per_block; size <= n; size <<= 1)
{
for (int stride = size / 2; stride > 0; stride >>= 1)
{
if (stride >= max_threads_per_block)
{
cuBitonicMergeGlobal <<< blockCount, threadCount>>>(dist_dev, ind_dev, n, size, stride, dir);
}
else
{
cuBitonicMergeShared <<< blockCount, threadCount>>>(dist_dev, ind_dev, n, size, dir);
break;
}
}
}
}
}
int factorRadix2(int *log2L, int L)
{
if (!L)
{
*log2L = 0;
return 0;
}
else
{
for (*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++);
return L;
}
}
void knn_brute_force_bitonic_sort(float *ref_host, int ref_nb, float *query_host, int dim, int k, float *dist_host, int *ind_host)
{
unsigned int size_of_float = sizeof(float);
unsigned int size_of_int = sizeof(int);
float *ref_dev;
float *dist_dev;
int *ind_dev;
int log2L;
int factorizationRemainder = factorRadix2(&log2L, ref_nb);
// assert(factorizationRemainder == 1);
checkCudaErrors(cudaMalloc( (void **) &dist_dev, ref_nb * size_of_float));
checkCudaErrors(cudaMalloc( (void **) &ind_dev, ref_nb * size_of_int));
checkCudaErrors(cudaMalloc( (void **) &ref_dev, ref_nb * size_of_float * dim));
checkCudaErrors(cudaMemcpy(ref_dev, ref_host, ref_nb * dim * size_of_float, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(query_dev, query_host, dim * size_of_float));
int threadCount = min(ref_nb, SHARED_SIZE_LIMIT);
int blockCount = ref_nb / threadCount;
blockCount = min(blockCount, 65000);
cuComputeDistanceGlobal <<< blockCount, threadCount>>>(ref_dev, ref_nb, dim, dist_dev, ind_dev);
bitonic_sort(dist_dev, ind_dev, ref_nb, 1);
cuParallelSqrt <<< k, 1>>>(dist_dev, k);
checkCudaErrors(cudaMemcpy(dist_host, dist_dev, k * size_of_float, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(ind_host, ind_dev, k * size_of_int, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(ref_dev));
checkCudaErrors(cudaFree(ind_dev));
}
|
bdca38c429edf874ba6f1c436b1bfdd982b7aaeb.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| bdca38c429edf874ba6f1c436b1bfdd982b7aaeb.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
86f90e5658ea5b0eb3866cc331fa1655d61e2e94.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/copying.hpp>
#include <cudf/scalar/scalar.hpp>
#include <functional>
#include <limits>
#include <memory>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/column_utilities.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/type_lists.hpp>
#include <type_traits>
using cudf::test::fixed_width_column_wrapper;
using TestTypes = cudf::test::Types<int32_t>;
template <typename T, typename ScalarType = cudf::scalar_type_t<T>>
std::unique_ptr<cudf::scalar> make_scalar(
hipStream_t stream = 0, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource())
{
auto s = new ScalarType(0, false, stream, mr);
return std::unique_ptr<cudf::scalar>(s);
}
template <typename T, typename ScalarType = cudf::scalar_type_t<T>>
std::unique_ptr<cudf::scalar> make_scalar(
T value,
hipStream_t stream = 0,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource())
{
auto s = new ScalarType(value, true, stream, mr);
return std::unique_ptr<cudf::scalar>(s);
}
template <typename T>
auto lowest = std::numeric_limits<T>::lowest();
template <typename T>
auto highest = std::numeric_limits<T>::max();
template <typename T>
struct ShiftTest : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(ShiftTest, cudf::test::FixedWidthTypes);
TYPED_TEST(ShiftTest, OneColumnEmpty)
{
using T = TypeParam;
std::vector<T> vals{};
std::vector<bool> mask{};
auto input = fixed_width_column_wrapper<T>{};
auto expected = fixed_width_column_wrapper<T>(vals.begin(), vals.end(), mask.begin());
auto fill = make_scalar<T>();
auto actual = cudf::shift(input, 5, *fill);
cudf::test::expect_columns_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, TwoColumnsEmpty)
{
using T = TypeParam;
std::vector<T> vals{};
std::vector<bool> mask{};
auto input = fixed_width_column_wrapper<T>(vals.begin(), vals.end(), mask.begin());
auto expected = fixed_width_column_wrapper<T>(vals.begin(), vals.end(), mask.begin());
auto fill = make_scalar<T>();
auto actual = cudf::shift(input, 5, *fill);
cudf::test::expect_columns_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, OneColumn)
{
using T = TypeParam;
auto input = fixed_width_column_wrapper<T>{lowest<T>, T(1), T(2), T(3), T(4), T(5), highest<T>};
auto expected = fixed_width_column_wrapper<T>{T(7), T(7), lowest<T>, T(1), T(2), T(3), T(4)};
auto fill = make_scalar<T>(7);
auto actual = cudf::shift(input, 2, *fill);
cudf::test::expect_columns_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, OneColumnNegativeShift)
{
using T = TypeParam;
auto input = fixed_width_column_wrapper<T>{lowest<T>, T(1), T(2), T(3), T(4), T(5), highest<T>};
auto expected = fixed_width_column_wrapper<T>{T(4), T(5), highest<T>, T(7), T(7), T(7), T(7)};
auto fill = make_scalar<T>(7);
auto actual = cudf::shift(input, -4, *fill);
cudf::test::expect_columns_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, OneColumnNullFill)
{
using T = TypeParam;
auto input = fixed_width_column_wrapper<T>{lowest<T>, T(5), T(0), T(3), T(0), T(1), highest<T>};
auto expected = fixed_width_column_wrapper<T>({T(0), T(0), lowest<T>, T(5), T(0), T(3), T(0)},
{0, 0, 1, 1, 1, 1, 1});
auto fill = make_scalar<T>();
auto actual = cudf::shift(input, 2, *fill);
cudf::test::expect_columns_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, TwoColumnsNullableInput)
{
using T = TypeParam;
auto input = fixed_width_column_wrapper<T>({1, 2, 3, 4, 5}, {0, 1, 1, 1, 0});
auto expected = fixed_width_column_wrapper<T>({7, 7, 1, 2, 3}, {1, 1, 0, 1, 1});
auto fill = make_scalar<T>(7);
auto actual = cudf::shift(input, 2, *fill);
cudf::test::expect_columns_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, MismatchFillValueDtypes)
{
using T = TypeParam;
if (std::is_same<T, int>::value) { return; }
auto input = fixed_width_column_wrapper<T>{};
auto fill = make_scalar<int>();
std::unique_ptr<cudf::column> output;
EXPECT_THROW(output = cudf::shift(input, 5, *fill), cudf::logic_error);
}
| 86f90e5658ea5b0eb3866cc331fa1655d61e2e94.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/copying.hpp>
#include <cudf/scalar/scalar.hpp>
#include <functional>
#include <limits>
#include <memory>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/column_utilities.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/type_lists.hpp>
#include <type_traits>
using cudf::test::fixed_width_column_wrapper;
using TestTypes = cudf::test::Types<int32_t>;
template <typename T, typename ScalarType = cudf::scalar_type_t<T>>
std::unique_ptr<cudf::scalar> make_scalar(
cudaStream_t stream = 0, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource())
{
auto s = new ScalarType(0, false, stream, mr);
return std::unique_ptr<cudf::scalar>(s);
}
template <typename T, typename ScalarType = cudf::scalar_type_t<T>>
std::unique_ptr<cudf::scalar> make_scalar(
T value,
cudaStream_t stream = 0,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource())
{
auto s = new ScalarType(value, true, stream, mr);
return std::unique_ptr<cudf::scalar>(s);
}
template <typename T>
auto lowest = std::numeric_limits<T>::lowest();
template <typename T>
auto highest = std::numeric_limits<T>::max();
template <typename T>
struct ShiftTest : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(ShiftTest, cudf::test::FixedWidthTypes);
TYPED_TEST(ShiftTest, OneColumnEmpty)
{
using T = TypeParam;
std::vector<T> vals{};
std::vector<bool> mask{};
auto input = fixed_width_column_wrapper<T>{};
auto expected = fixed_width_column_wrapper<T>(vals.begin(), vals.end(), mask.begin());
auto fill = make_scalar<T>();
auto actual = cudf::shift(input, 5, *fill);
cudf::test::expect_columns_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, TwoColumnsEmpty)
{
using T = TypeParam;
std::vector<T> vals{};
std::vector<bool> mask{};
auto input = fixed_width_column_wrapper<T>(vals.begin(), vals.end(), mask.begin());
auto expected = fixed_width_column_wrapper<T>(vals.begin(), vals.end(), mask.begin());
auto fill = make_scalar<T>();
auto actual = cudf::shift(input, 5, *fill);
cudf::test::expect_columns_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, OneColumn)
{
using T = TypeParam;
auto input = fixed_width_column_wrapper<T>{lowest<T>, T(1), T(2), T(3), T(4), T(5), highest<T>};
auto expected = fixed_width_column_wrapper<T>{T(7), T(7), lowest<T>, T(1), T(2), T(3), T(4)};
auto fill = make_scalar<T>(7);
auto actual = cudf::shift(input, 2, *fill);
cudf::test::expect_columns_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, OneColumnNegativeShift)
{
using T = TypeParam;
auto input = fixed_width_column_wrapper<T>{lowest<T>, T(1), T(2), T(3), T(4), T(5), highest<T>};
auto expected = fixed_width_column_wrapper<T>{T(4), T(5), highest<T>, T(7), T(7), T(7), T(7)};
auto fill = make_scalar<T>(7);
auto actual = cudf::shift(input, -4, *fill);
cudf::test::expect_columns_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, OneColumnNullFill)
{
using T = TypeParam;
auto input = fixed_width_column_wrapper<T>{lowest<T>, T(5), T(0), T(3), T(0), T(1), highest<T>};
auto expected = fixed_width_column_wrapper<T>({T(0), T(0), lowest<T>, T(5), T(0), T(3), T(0)},
{0, 0, 1, 1, 1, 1, 1});
auto fill = make_scalar<T>();
auto actual = cudf::shift(input, 2, *fill);
cudf::test::expect_columns_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, TwoColumnsNullableInput)
{
using T = TypeParam;
auto input = fixed_width_column_wrapper<T>({1, 2, 3, 4, 5}, {0, 1, 1, 1, 0});
auto expected = fixed_width_column_wrapper<T>({7, 7, 1, 2, 3}, {1, 1, 0, 1, 1});
auto fill = make_scalar<T>(7);
auto actual = cudf::shift(input, 2, *fill);
cudf::test::expect_columns_equal(expected, *actual);
}
TYPED_TEST(ShiftTest, MismatchFillValueDtypes)
{
using T = TypeParam;
if (std::is_same<T, int>::value) { return; }
auto input = fixed_width_column_wrapper<T>{};
auto fill = make_scalar<int>();
std::unique_ptr<cudf::column> output;
EXPECT_THROW(output = cudf::shift(input, 5, *fill), cudf::logic_error);
}
|
15cdc0cd74788e0c3b68e2aebd50efa65f3da59f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//#define DEPTH 2
// dp - cost aggregation array
// cost_image - m x n x D array
// d - use every d channels of input to conserve register memory
// m - image rows
// n - image columns
// D - depth
// depth_stride - pitch along depth dimension
// row_stride - pitch along row dimension
__device__ float dp_criteria(float *dp, int ind, int depth_dim_size, int d, float P_one, float P_two, float * d_zero, float * d_one, float * d_two, float * d_three){
*d_zero = dp[ind];
if (d > 0)
*d_one = dp[ind - depth_dim_size] + P_one;
else
*d_one = 10000000;
if (d < D-1)
*d_two = dp[ind + depth_dim_size] + P_one;
else
*d_two = 10000000;
return fminf(fminf(*d_zero, *d_one), fminf(*d_two, *d_three)) - *d_three + P_two;
}
__global__ void __vertical_aggregate_up(float *dp, float *cost_image, int m, int n)
{
// which column of array to work on
int col = blockDim.x * blockIdx.x + threadIdx.x;
int depth_dim_size = m*n;
// todo: maybe it will work better to take running average of every d
// slices
while(col < n)
{
for (int row = m-2; row >= 0; row--)
{
//int arr_ind = 0;
float prev_min = 100000000.0;
int ind = (row + 1) * n + col;
// calculate min cost disparity for this column from row-1
//#pragma unroll
for (int depth = 0; depth < D; depth+=D_STEP){
prev_min = fminf(dp[ind], prev_min);
ind += (depth_dim_size * D_STEP);
//arr[arr_ind] = cost_image[depth * m * n + (row - 1) * n + col];
//arr_ind++;
}
// float prev_min = arr_min(arr, D_SIZE);
float d0 = 0;
float d1 = 0;
float d2 = 0;
float d3 = prev_min + (float) P2;
ind = (row + 1) * n + col;
int current_ind = row * n + col;
// todo: try having this loop go from 1 to d-1 and removing the if else
for (int d = 0; d < D; d+=D_STEP){
// for each d I need dp[{d-1, d, d+1}, row-1, col],
dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3);
ind += (depth_dim_size * D_STEP);
current_ind += (depth_dim_size * D_STEP);
}
}
col += blockDim.x;
}
} | 15cdc0cd74788e0c3b68e2aebd50efa65f3da59f.cu | #include "includes.h"
//#define DEPTH 2
// dp - cost aggregation array
// cost_image - m x n x D array
// d - use every d channels of input to conserve register memory
// m - image rows
// n - image columns
// D - depth
// depth_stride - pitch along depth dimension
// row_stride - pitch along row dimension
__device__ float dp_criteria(float *dp, int ind, int depth_dim_size, int d, float P_one, float P_two, float * d_zero, float * d_one, float * d_two, float * d_three){
*d_zero = dp[ind];
if (d > 0)
*d_one = dp[ind - depth_dim_size] + P_one;
else
*d_one = 10000000;
if (d < D-1)
*d_two = dp[ind + depth_dim_size] + P_one;
else
*d_two = 10000000;
return fminf(fminf(*d_zero, *d_one), fminf(*d_two, *d_three)) - *d_three + P_two;
}
__global__ void __vertical_aggregate_up(float *dp, float *cost_image, int m, int n)
{
// which column of array to work on
int col = blockDim.x * blockIdx.x + threadIdx.x;
int depth_dim_size = m*n;
// todo: maybe it will work better to take running average of every d
// slices
while(col < n)
{
for (int row = m-2; row >= 0; row--)
{
//int arr_ind = 0;
float prev_min = 100000000.0;
int ind = (row + 1) * n + col;
// calculate min cost disparity for this column from row-1
//#pragma unroll
for (int depth = 0; depth < D; depth+=D_STEP){
prev_min = fminf(dp[ind], prev_min);
ind += (depth_dim_size * D_STEP);
//arr[arr_ind] = cost_image[depth * m * n + (row - 1) * n + col];
//arr_ind++;
}
// float prev_min = arr_min(arr, D_SIZE);
float d0 = 0;
float d1 = 0;
float d2 = 0;
float d3 = prev_min + (float) P2;
ind = (row + 1) * n + col;
int current_ind = row * n + col;
// todo: try having this loop go from 1 to d-1 and removing the if else
for (int d = 0; d < D; d+=D_STEP){
// for each d I need dp[{d-1, d, d+1}, row-1, col],
dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3);
ind += (depth_dim_size * D_STEP);
current_ind += (depth_dim_size * D_STEP);
}
}
col += blockDim.x;
}
} |
0a100ad318a3d5339f542fabda2036c6356af320.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/AbsCriterion.cu"
#else
#include "THHApply.cuh"
void THNN_(AbsCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *output,
bool sizeAverage,
bool reduce)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 3, input, target, output);
if (!reduce) {
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply3(state, input, target, output,
abs_updateOutput_no_reduce_functor<real>());
return;
}
THCTensor_(resize1d)(state, output, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
accreal sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal)0, thrust::plus<accreal>(), abs_functor<real, accreal>());
if (sizeAverage)
sum /= size;
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, real>::to(sum));
}
void THNN_(AbsCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
bool sizeAverage,
bool reduce)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 4, input, target, gradOutput, gradInput);
THCTensor_(resizeAs)(state, gradInput, input);
if (!reduce) {
THCUNN_check_shape(state, gradOutput, input);
THC_pointwiseApply3(state, input, target, gradInput,
abs_updateGradInput_no_reduce_functor<real>());
THCTensor_(cmul)(state, gradInput, gradInput, gradOutput);
return;
}
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
real norm = ScalarConvert<double, real>::to(sizeAverage ? 1./size : 1.);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<real> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(input_data, input_data+size, target_data, gradInput_data,
abs_updateGradInput_functor<real>(norm, THCTensor_(get1d)(state, gradOutput, 0)));
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
}
#endif
| 0a100ad318a3d5339f542fabda2036c6356af320.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/AbsCriterion.cu"
#else
#include "THCApply.cuh"
void THNN_(AbsCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *output,
bool sizeAverage,
bool reduce)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 3, input, target, output);
if (!reduce) {
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply3(state, input, target, output,
abs_updateOutput_no_reduce_functor<real>());
return;
}
THCTensor_(resize1d)(state, output, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
accreal sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal)0, thrust::plus<accreal>(), abs_functor<real, accreal>());
if (sizeAverage)
sum /= size;
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, real>::to(sum));
}
void THNN_(AbsCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
bool sizeAverage,
bool reduce)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 4, input, target, gradOutput, gradInput);
THCTensor_(resizeAs)(state, gradInput, input);
if (!reduce) {
THCUNN_check_shape(state, gradOutput, input);
THC_pointwiseApply3(state, input, target, gradInput,
abs_updateGradInput_no_reduce_functor<real>());
THCTensor_(cmul)(state, gradInput, gradInput, gradOutput);
return;
}
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
real norm = ScalarConvert<double, real>::to(sizeAverage ? 1./size : 1.);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<real> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(input_data, input_data+size, target_data, gradInput_data,
abs_updateGradInput_functor<real>(norm, THCTensor_(get1d)(state, gradOutput, 0)));
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
}
#endif
|
a9495ca69352a4487584da7cdfae0594f7a57b18.hip | // !!! This is a file automatically generated by hipify!!!
// Exemplo para o curso de Super Computacao
// Criado por: Luciano P. Soares (10 de Abril de 2018)
#include <stdio.h>
#include <stdlib.h>
//#include <hip/hip_runtime.h>
//#include <hip/hip_runtime.h>
/* Informacoes da GPU */
int main() {
int dev_count;
hipGetDeviceCount(&dev_count);
printf("Numero de devices (GPU) = %d\n\n", dev_count );
hipDeviceProp_t dev_prop;
for (int i = 0; i < dev_count; i++) {
printf("\tDevice (%d)\n", i);
hipGetDeviceProperties(&dev_prop, i);
printf("\t\tNumero maximo de Bloco\n");
printf("\t\t\t Dimensao maxima em x = %d, y = %d, z = %d\n", dev_prop.maxGridSize[0],dev_prop.maxGridSize[1],dev_prop.maxGridSize[2] );
printf("\t\tNumero maximo de Threads por Bloco = %d\n", dev_prop.maxThreadsPerBlock );
printf("\t\t\t Dimensao maxima em x = %d, y = %d, z = %d\n", dev_prop.maxThreadsDim[0],dev_prop.maxThreadsDim[1],dev_prop.maxThreadsDim[2] );
printf("\t\tNumero maximo de Streaming Multiprocessors (SMs) = %d\n", dev_prop.multiProcessorCount );
printf("\t\tFrequencia de Clock = %d\n", dev_prop.clockRate );
printf("\t\tTamanho do Warp = %d\n", dev_prop.warpSize );
}
return 0;
}
| a9495ca69352a4487584da7cdfae0594f7a57b18.cu | // Exemplo para o curso de Super Computacao
// Criado por: Luciano P. Soares (10 de Abril de 2018)
#include <stdio.h>
#include <stdlib.h>
//#include <cuda.h>
//#include <cuda_runtime.h>
/* Informacoes da GPU */
int main() {
int dev_count;
cudaGetDeviceCount(&dev_count);
printf("Numero de devices (GPU) = %d\n\n", dev_count );
cudaDeviceProp dev_prop;
for (int i = 0; i < dev_count; i++) {
printf("\tDevice (%d)\n", i);
cudaGetDeviceProperties(&dev_prop, i);
printf("\t\tNumero maximo de Bloco\n");
printf("\t\t\t Dimensao maxima em x = %d, y = %d, z = %d\n", dev_prop.maxGridSize[0],dev_prop.maxGridSize[1],dev_prop.maxGridSize[2] );
printf("\t\tNumero maximo de Threads por Bloco = %d\n", dev_prop.maxThreadsPerBlock );
printf("\t\t\t Dimensao maxima em x = %d, y = %d, z = %d\n", dev_prop.maxThreadsDim[0],dev_prop.maxThreadsDim[1],dev_prop.maxThreadsDim[2] );
printf("\t\tNumero maximo de Streaming Multiprocessors (SMs) = %d\n", dev_prop.multiProcessorCount );
printf("\t\tFrequencia de Clock = %d\n", dev_prop.clockRate );
printf("\t\tTamanho do Warp = %d\n", dev_prop.warpSize );
}
return 0;
}
|
fbc6e2fdd586f408915288f08fb1cbe295172ead.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <time.h>
#define BLOCK_SIZE 128
#define N 4194304
// #define N 2048
__global__ void reduce0(int *g_idata, int *g_odata , int size)
{
__shared__ int sdata[BLOCK_SIZE];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// printf("%d\n" , i);
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
if (tid % (2 * s) == 0)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce1(int *g_idata, int *g_odata , int size)
{
__shared__ int sdata[BLOCK_SIZE];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce2(int *g_idata, int *g_odata, int size)
{
__shared__ int sdata[BLOCK_SIZE];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce3(int *g_idata, int *g_odata, int size)
{
__shared__ int sdata[BLOCK_SIZE];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
if (i < size)
{
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
}
__global__ void reduce4(int *g_idata, int *g_odata, int size)
{
__shared__ int sdata[BLOCK_SIZE];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
if (i < size)
{
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid < 32)
{
sdata[tid] += sdata[tid + 32];
__syncthreads();
sdata[tid] += sdata[tid + 16];
__syncthreads();
sdata[tid] += sdata[tid + 8];
__syncthreads();
sdata[tid] += sdata[tid + 4];
__syncthreads();
sdata[tid] += sdata[tid + 2];
__syncthreads();
sdata[tid] += sdata[tid + 1];
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
}
#define KERNEL_NUM 5
void (*KERNELS[KERNEL_NUM])(int *, int * ,int) = {
reduce0, reduce1, reduce2, reduce3, reduce4};
void constantInit(int *data, int size, int val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
void checkError(hipError_t error, int line)
{
if (error != hipSuccess)
{
printf("### error occurred in line %d \n error : %s", line, hipGetErrorString(error));
exit(EXIT_FAILURE);
}
}
float serial_reduction()
{
clock_t beginn = clock();
int *A = (int *)malloc(sizeof(int) * N);
constantInit(A, N, 1);
long int sum = 0;
clock_t begin = clock();
for (int i = 0; i < N; i++)
sum += A[i];
clock_t end = clock();
float time_spent = ((float)(end - begin) / CLOCKS_PER_SEC) * 1000;
printf("serial execution : %f ms\n", time_spent);
clock_t endd = clock();
float time_spentt = ((float)(endd - beginn) / CLOCKS_PER_SEC) * 1000;
printf("total serial execution : %f ms\n", time_spentt);
return time_spent;
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int reduction(int argc, char **argv, int n, int func_index)
{
// Allocate host memory for matrices A and B
unsigned int msize = n;
unsigned int mem_size = sizeof(int) * msize;
int *h_in = (int *)malloc(mem_size);
constantInit(h_in, msize, 1);
// Allocate device memory
int *d_in;
int *d_out;
int grid_size = (n - 1) / BLOCK_SIZE + 1;
if (func_index >= 3)
grid_size /= 2;
hipError_t error;
clock_t begin = clock();
error = hipMalloc((void **)&d_in, mem_size);
checkError(error, __LINE__);
int output_size = grid_size * sizeof(int);
error = hipMalloc((void **)&d_out,output_size );
checkError(error, __LINE__);
// copy host memory to device
error = hipMemcpy(d_in, h_in, mem_size, hipMemcpyHostToDevice);
checkError(error, __LINE__);
float total_time = 0.0f;
printf("grid size : %d block size : %d number of threads : %d \n", grid_size, BLOCK_SIZE, grid_size * BLOCK_SIZE);
int stride = 1;
int size = N;
while (grid_size >= 1)
{
output_size = grid_size * sizeof(int);
hipEvent_t start;
error = hipEventCreate(&start);
checkError(error, __LINE__);
hipEvent_t stop;
error = hipEventCreate(&stop);
checkError(error, __LINE__);
// Record the start event
error = hipEventRecord(start, NULL);
checkError(error, __LINE__);
dim3 threads(BLOCK_SIZE, 1, 1);
dim3 grid(grid_size, 1, 1);
KERNELShipLaunchKernelGGL(([func_index)], dim3(grid), dim3(threads), 0, 0, d_in, d_out, size);
error = hipGetLastError();
checkError(error, __LINE__);
// Record the stop event
error = hipEventRecord(stop, NULL);
checkError(error, __LINE__);
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
checkError(error, __LINE__);
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
total_time += msecTotal;
error = hipEventElapsedTime(&msecTotal, start, stop);
checkError(error, __LINE__);
// Copy result from device to host
grid_size /= BLOCK_SIZE;
stride *= BLOCK_SIZE;
size /= BLOCK_SIZE;
hipFree(d_in);
d_in = d_out;
error = hipMalloc((void **)&d_out, output_size);
checkError(error, __LINE__);
}
int *h_out = (int *)malloc(output_size);
error = hipMemcpy(h_out, d_in, output_size, hipMemcpyDeviceToHost);
checkError(error, __LINE__);
int total_sum = 0;
for(int i = 0 ; i < output_size / sizeof(int) ; i++)
total_sum += h_out[i];
printf("Elapsed time in msec = %f and bandwidth %f GB/s result = %d \n", total_time, mem_size / (total_time * 1e6), total_sum);
// Clean up memory
free(h_in);
free(h_out);
hipFree(d_in);
hipFree(d_out);
clock_t end = clock();
float time_spent = ((float)(end - begin) / CLOCKS_PER_SEC) * 1000;
printf("execution + memory allocations : %f ms\n", time_spent);
return EXIT_SUCCESS;
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Reduction Using CUDA] - Starting...\n");
// By default, we use device 0
int devID = 0;
hipSetDevice(devID);
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
checkError(error, __LINE__);
error = hipGetDeviceProperties(&deviceProp, devID);
checkError(error, __LINE__);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
int n = N;
printf("Array with size (%d)\n", n);
// serial_reduction();
for (size_t i = 0; i < KERNEL_NUM; i++)
{
printf("\n num implementation : %d \n", (int)i + 1);
reduction(argc, argv, n, i);
}
return 0;
}
| fbc6e2fdd586f408915288f08fb1cbe295172ead.cu | // System includes
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <time.h>
#define BLOCK_SIZE 128
#define N 4194304
// #define N 2048
__global__ void reduce0(int *g_idata, int *g_odata , int size)
{
__shared__ int sdata[BLOCK_SIZE];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// printf("%d\n" , i);
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
if (tid % (2 * s) == 0)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce1(int *g_idata, int *g_odata , int size)
{
__shared__ int sdata[BLOCK_SIZE];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce2(int *g_idata, int *g_odata, int size)
{
__shared__ int sdata[BLOCK_SIZE];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce3(int *g_idata, int *g_odata, int size)
{
__shared__ int sdata[BLOCK_SIZE];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
if (i < size)
{
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
}
__global__ void reduce4(int *g_idata, int *g_odata, int size)
{
__shared__ int sdata[BLOCK_SIZE];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
if (i < size)
{
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid < 32)
{
sdata[tid] += sdata[tid + 32];
__syncthreads();
sdata[tid] += sdata[tid + 16];
__syncthreads();
sdata[tid] += sdata[tid + 8];
__syncthreads();
sdata[tid] += sdata[tid + 4];
__syncthreads();
sdata[tid] += sdata[tid + 2];
__syncthreads();
sdata[tid] += sdata[tid + 1];
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
}
#define KERNEL_NUM 5
void (*KERNELS[KERNEL_NUM])(int *, int * ,int) = {
reduce0, reduce1, reduce2, reduce3, reduce4};
void constantInit(int *data, int size, int val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
void checkError(cudaError_t error, int line)
{
if (error != cudaSuccess)
{
printf("### error occurred in line %d \n error : %s", line, cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
float serial_reduction()
{
clock_t beginn = clock();
int *A = (int *)malloc(sizeof(int) * N);
constantInit(A, N, 1);
long int sum = 0;
clock_t begin = clock();
for (int i = 0; i < N; i++)
sum += A[i];
clock_t end = clock();
float time_spent = ((float)(end - begin) / CLOCKS_PER_SEC) * 1000;
printf("serial execution : %f ms\n", time_spent);
clock_t endd = clock();
float time_spentt = ((float)(endd - beginn) / CLOCKS_PER_SEC) * 1000;
printf("total serial execution : %f ms\n", time_spentt);
return time_spent;
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int reduction(int argc, char **argv, int n, int func_index)
{
// Allocate host memory for matrices A and B
unsigned int msize = n;
unsigned int mem_size = sizeof(int) * msize;
int *h_in = (int *)malloc(mem_size);
constantInit(h_in, msize, 1);
// Allocate device memory
int *d_in;
int *d_out;
int grid_size = (n - 1) / BLOCK_SIZE + 1;
if (func_index >= 3)
grid_size /= 2;
cudaError_t error;
clock_t begin = clock();
error = cudaMalloc((void **)&d_in, mem_size);
checkError(error, __LINE__);
int output_size = grid_size * sizeof(int);
error = cudaMalloc((void **)&d_out,output_size );
checkError(error, __LINE__);
// copy host memory to device
error = cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice);
checkError(error, __LINE__);
float total_time = 0.0f;
printf("grid size : %d block size : %d number of threads : %d \n", grid_size, BLOCK_SIZE, grid_size * BLOCK_SIZE);
int stride = 1;
int size = N;
while (grid_size >= 1)
{
output_size = grid_size * sizeof(int);
cudaEvent_t start;
error = cudaEventCreate(&start);
checkError(error, __LINE__);
cudaEvent_t stop;
error = cudaEventCreate(&stop);
checkError(error, __LINE__);
// Record the start event
error = cudaEventRecord(start, NULL);
checkError(error, __LINE__);
dim3 threads(BLOCK_SIZE, 1, 1);
dim3 grid(grid_size, 1, 1);
KERNELS[func_index]<<<grid, threads>>>(d_in, d_out, size);
error = cudaGetLastError();
checkError(error, __LINE__);
// Record the stop event
error = cudaEventRecord(stop, NULL);
checkError(error, __LINE__);
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
checkError(error, __LINE__);
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
total_time += msecTotal;
error = cudaEventElapsedTime(&msecTotal, start, stop);
checkError(error, __LINE__);
// Copy result from device to host
grid_size /= BLOCK_SIZE;
stride *= BLOCK_SIZE;
size /= BLOCK_SIZE;
cudaFree(d_in);
d_in = d_out;
error = cudaMalloc((void **)&d_out, output_size);
checkError(error, __LINE__);
}
int *h_out = (int *)malloc(output_size);
error = cudaMemcpy(h_out, d_in, output_size, cudaMemcpyDeviceToHost);
checkError(error, __LINE__);
int total_sum = 0;
for(int i = 0 ; i < output_size / sizeof(int) ; i++)
total_sum += h_out[i];
printf("Elapsed time in msec = %f and bandwidth %f GB/s result = %d \n", total_time, mem_size / (total_time * 1e6), total_sum);
// Clean up memory
free(h_in);
free(h_out);
cudaFree(d_in);
cudaFree(d_out);
clock_t end = clock();
float time_spent = ((float)(end - begin) / CLOCKS_PER_SEC) * 1000;
printf("execution + memory allocations : %f ms\n", time_spent);
return EXIT_SUCCESS;
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Reduction Using CUDA] - Starting...\n");
// By default, we use device 0
int devID = 0;
cudaSetDevice(devID);
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
checkError(error, __LINE__);
error = cudaGetDeviceProperties(&deviceProp, devID);
checkError(error, __LINE__);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
int n = N;
printf("Array with size (%d)\n", n);
// serial_reduction();
for (size_t i = 0; i < KERNEL_NUM; i++)
{
printf("\n num implementation : %d \n", (int)i + 1);
reduction(argc, argv, n, i);
}
return 0;
}
|
1de34b7b1af051942ae40a4cc92d2980aac0c73d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_bc.cu
*
* @brief Simple test driver program for BC.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <fstream>
#include <algorithm>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BC includes
#include <gunrock/app/bc/bc_enactor.cuh>
#include <gunrock/app/bc/bc_problem.cuh>
#include <gunrock/app/bc/bc_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// Boost includes
#include <boost/config.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/connected_components.hpp>
#include <boost/graph/bc_clustering.hpp>
#include <boost/graph/iteration_macros.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bc;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf("\ntest_bc <graph type> <graph type args> [--device=<device_index>] "
"[--instrumented] [--src=<source index>] [--quick] [--v]"
"[--queue-sizing=<scale factor>] [--ref-file=<reference filename>]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
"--device=<device_index>: Set GPU device for running the graph primitive.\n"
"--undirected: If set then treat the graph as undirected graph.\n"
"--instrumented: If set then kernels keep track of queue-search_depth\n"
"and barrier duty (a relative indicator of load imbalance.)\n"
"--src=<source index>: When source index is -1, compute BC value for each\n"
"node. Otherwise, debug the delta value for one node\n"
"--quick: If set will skip the CPU validation code.\n"
"--queue-sizing Allocates a frontier queue sized at (graph-edges * <scale factor>).\n"
"Default is 1.0.\n"
"--v: If set, enable verbose output, keep track of the kernel running.\n"
"--ref-file: If set, use pre-computed result stored in ref-file to verify.\n"
);
}
/**
* @brief Displays the BC result (sigma value and BC value)
*
* @param[in] sigmas
* @param[in] bc_values
* @param[in] nodes
*/
template<typename Value, typename SizeT>
void DisplaySolution(Value *sigmas, Value *bc_values, SizeT nodes)
{
if (nodes < 40) {
printf("[");
for (SizeT i = 0; i < nodes; ++i) {
PrintValue(i);
printf(":");
PrintValue(sigmas[i]);
printf(",");
PrintValue(bc_values[i]);
printf(" ");
}
printf("]\n");
}
}
/******************************************************************************
* BC Testing Routines
*****************************************************************************/
/**
* @brief Graph edge properties (bundled properties)
*/
struct EdgeProperties
{
int weight;
};
/**
* @brief A simple CPU-based reference BC ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to graph we process on
* @param[in] bc_values Pointer to node bc value
* @param[in] ebc_values Pointer to edge bc value
* @param[in] sigmas Pointer to node sigma value
* @param[in] src VertexId of source node if there is any
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void RefCPUBC(
const Csr<VertexId, Value, SizeT> &graph,
Value *bc_values,
Value *ebc_values,
Value *sigmas,
VertexId src)
{
typedef Coo<VertexId, Value> EdgeTupleType;
EdgeTupleType *coo = (EdgeTupleType*) malloc(sizeof(EdgeTupleType) * graph.edges);
if (src == -1) {
// Perform full exact BC using BGL
using namespace boost;
typedef adjacency_list <setS, vecS, undirectedS, no_property,
EdgeProperties> Graph;
typedef Graph::vertex_descriptor Vertex;
typedef Graph::edge_descriptor Edge;
Graph G;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
add_edge(vertex(i, G), vertex(graph.column_indices[j], G), G);
}
}
typedef std::map<Edge, int> StdEdgeIndexMap;
StdEdgeIndexMap my_e_index;
typedef boost::associative_property_map< StdEdgeIndexMap > EdgeIndexMap;
EdgeIndexMap e_index(my_e_index);
int i = 0;
BGL_FORALL_EDGES(edge, G, Graph)
{
my_e_index.insert(std::pair<Edge, int>(edge, i));
++i;
}
// Define EdgeCentralityMap
std::vector< double > e_centrality_vec(boost::num_edges(G), 0.0);
// Create the external property map
boost::iterator_property_map< std::vector< double >::iterator,
EdgeIndexMap >
e_centrality_map(e_centrality_vec.begin(), e_index);
// Define VertexCentralityMap
typedef boost::property_map< Graph, boost::vertex_index_t>::type
VertexIndexMap;
VertexIndexMap v_index = get(boost::vertex_index, G);
std::vector< double > v_centrality_vec(boost::num_vertices(G), 0.0);
// Create the external property map
boost::iterator_property_map< std::vector< double >::iterator,
VertexIndexMap>
v_centrality_map(v_centrality_vec.begin(), v_index);
//
// Perform BC
//
CpuTimer cpu_timer;
cpu_timer.Start();
brandes_betweenness_centrality( G, v_centrality_map, e_centrality_map );
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
BGL_FORALL_VERTICES(vertex, G, Graph)
{
bc_values[vertex] = (Value)v_centrality_map[vertex];
}
int idx = 0;
BGL_FORALL_EDGES(edge, G, Graph)
{
coo[idx].row = source(edge, G);
coo[idx].col = target(edge, G);
coo[idx++].val = (Value)e_centrality_map[edge];
coo[idx].col = source(edge, G);
coo[idx].row = target(edge, G);
coo[idx++].val = (Value)e_centrality_map[edge];
}
std::stable_sort(coo, coo+graph.edges, RowFirstTupleCompare<EdgeTupleType>);
for (idx = 0; idx < graph.edges; ++idx) {
//std::cout << coo[idx].row << "," << coo[idx].col << ":" << coo[idx].val << std::endl;
ebc_values[idx] = coo[idx].val;
}
printf("CPU BC finished in %lf msec.", elapsed);
}
else {
//Simple BFS pass to get single pass BC
VertexId *source_path = new VertexId[graph.nodes];
//initialize distances
for (VertexId i = 0; i < graph.nodes; ++i) {
source_path[i] = -1;
bc_values[i] = 0;
sigmas[i] = 0;
}
source_path[src] = 0;
VertexId search_depth = 0;
sigmas[src] = 1;
// Initialize queue for managing previously-discovered nodes
std::deque<VertexId> frontier;
frontier.push_back(src);
//
//Perform one pass of BFS for one source
//
CpuTimer cpu_timer;
cpu_timer.Start();
while (!frontier.empty()) {
// Dequeue node from frontier
VertexId dequeued_node = frontier.front();
frontier.pop_front();
VertexId neighbor_dist = source_path[dequeued_node] + 1;
// Locate adjacency list
int edges_begin = graph.row_offsets[dequeued_node];
int edges_end = graph.row_offsets[dequeued_node + 1];
for (int edge = edges_begin; edge < edges_end; ++edge) {
// Lookup neighbor and enqueue if undiscovered
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == -1) {
source_path[neighbor] = neighbor_dist;
sigmas[neighbor] += sigmas[dequeued_node];
if (search_depth < neighbor_dist) {
search_depth = neighbor_dist;
}
frontier.push_back(neighbor);
}
else {
if (source_path[neighbor] == source_path[dequeued_node]+1)
sigmas[neighbor] += sigmas[dequeued_node];
}
}
}
search_depth++;
for (int iter = search_depth - 2; iter > 0; --iter)
{
for (int node = 0; node < graph.nodes; ++node)
{
if (source_path[node] == iter) {
int edges_begin = graph.row_offsets[node];
int edges_end = graph.row_offsets[node+1];
for (int edge = edges_begin; edge < edges_end; ++edge) {
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == iter + 1) {
bc_values[node] +=
1.0f * sigmas[node] / sigmas[neighbor] *
(1.0f + bc_values[neighbor]);
}
}
}
}
}
for (int i = 0; i < graph.nodes; ++i)
{
bc_values[i] *= 0.5f;
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
printf("CPU BFS finished in %lf msec. Search depth is:%d\n",
elapsed, search_depth);
delete[] source_path;
}
free(coo);
}
/**
* @brief Run betweenness centrality tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph object defined in main driver
* @param[in] src
* @param[in] ref_filename
* @param[in] max_grid_size
* @param[in] num_gpus
* @param[in] max_queue_sizing
* @param[in] iterations Number of iterations for running the test
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
std::string &ref_filename,
int max_grid_size,
int num_gpus,
double max_queue_sizing,
int iterations,
CudaContext& context)
{
typedef BCProblem<
VertexId,
SizeT,
Value,
true, // MARK_PREDECESSORS
false> Problem; //does not use double buffer
// Allocate host-side array (for both reference and gpu-computed results)
Value *reference_bc_values = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *reference_ebc_values = (Value*)malloc(sizeof(Value) * graph.edges);
Value *reference_sigmas = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_sigmas = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_bc_values = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_ebc_values = (Value*)malloc(sizeof(Value) * graph.edges);
Value *reference_check_bc_values = (g_quick) ? NULL : reference_bc_values;
Value *reference_check_ebc_values = (g_quick || (src != -1)) ? NULL : reference_ebc_values;
Value *reference_check_sigmas = (g_quick || (src == -1)) ? NULL : reference_sigmas;
// Allocate BC enactor map
BCEnactor<INSTRUMENT> bc_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
num_gpus), "BC Problem Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU BC solution for source-distance
//
if (reference_check_bc_values != NULL) {
if (ref_filename.empty()) {
printf("Computing reference value ...\n");
RefCPUBC(
graph,
reference_check_bc_values,
reference_check_ebc_values,
reference_check_sigmas,
src);
printf("\n");
} else {
std::ifstream fin;
fin.open(ref_filename.c_str(), std::ios::binary);
for ( int i = 0; i < graph.nodes; ++i )
{
fin.read(reinterpret_cast<char*>(&reference_check_bc_values[i]), sizeof(Value));
}
fin.close();
}
}
double avg_duty = 0.0;
// Perform BC
GpuTimer gpu_timer;
VertexId start_src;
VertexId end_src;
if (src == -1)
{
start_src = 0;
end_src = graph.nodes;
}
else
{
start_src = src;
end_src = src+1;
}
float elapsed = 0.0f;
for (int iter = 0; iter < iterations; ++iter)
{
hipLaunchKernelGGL(( util::MemsetKernel), dim3(128), dim3(128), 0, 0,
csr_problem->data_slices[0]->d_bc_values, (Value)0.0f, (int)graph.nodes);
gpu_timer.Start();
for (VertexId i = start_src; i < end_src; ++i)
{
util::GRError(csr_problem->Reset(i, bc_enactor.GetFrontierType(), max_queue_sizing), "BC Problem Data Reset Failed", __FILE__, __LINE__);
util::GRError(bc_enactor.template Enact<Problem>(context, csr_problem, i, max_grid_size), "BC Problem Enact Failed", __FILE__, __LINE__);
}
hipLaunchKernelGGL(( util::MemsetScaleKernel), dim3(128), dim3(128), 0, 0,
csr_problem->data_slices[0]->d_bc_values, (Value)0.5f, (int)graph.nodes);
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
}
elapsed /= iterations;
bc_enactor.GetStatistics(avg_duty);
// Copy out results
util::GRError(csr_problem->Extract(h_sigmas, h_bc_values, h_ebc_values), "BC Problem Data Extraction Failed", __FILE__, __LINE__);
/*printf("edge bc values: %d\n", graph.edges);
for (int i = 0; i < graph.edges; ++i) {
printf("%5f, %5f\n", h_ebc_values[i], reference_check_ebc_values[i]);
}
printf("edge bc values end\n");*/
// Verify the result
if (reference_check_bc_values != NULL) {
printf("Validity BC Value: ");
int num_error = CompareResults(h_bc_values, reference_check_bc_values, graph.nodes,
true);
if (num_error > 0)
printf("Number of errors occurred: %d\n", num_error);
printf("\n");
}
if (reference_check_ebc_values != NULL) {
printf("Validity Edge BC Value: ");
CompareResults(h_ebc_values, reference_check_ebc_values, graph.edges,
true);
printf("\n");
}
if (reference_check_sigmas != NULL) {
printf("Validity Sigma: ");
CompareResults(h_sigmas, reference_check_sigmas, graph.nodes, true);
printf("\n");
}
// Display Solution
DisplaySolution(h_sigmas, h_bc_values, graph.nodes);
printf("GPU BC finished in %lf msec.\n", elapsed);
if (avg_duty != 0)
printf("\n avg CTA duty: %.2f%% \n", avg_duty * 100);
// Cleanup
if (csr_problem) delete csr_problem;
if (reference_sigmas) free(reference_sigmas);
if (reference_bc_values) free(reference_bc_values);
if (reference_ebc_values) free(reference_ebc_values);
if (h_sigmas) free(h_sigmas);
if (h_bc_values) free(h_bc_values);
hipDeviceSynchronize();
}
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args,
CudaContext& context)
{
VertexId src = -1; // Use whatever the specified graph-type's default is
std::string src_str;
std::string ref_filename;
bool instrumented = false; // Whether or not to collect instrumentation from kernels
int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
double max_queue_sizing = 1.0; // Maximum size scaling factor for work queues (e.g., 1.0 creates n and m-element vertex and edge frontiers).
int iterations = 1;
g_quick = false; // Whether or not to skip ref validation
args.GetCmdLineArgument("iteration-num", iterations);
instrumented = args.CheckCmdLineFlag("instrumented");
args.GetCmdLineArgument("src", src_str);
args.GetCmdLineArgument("ref-file", ref_filename);
if (src_str.empty()) {
src = -1;
} else {
args.GetCmdLineArgument("src", src);
}
g_quick = args.CheckCmdLineFlag("quick");
args.GetCmdLineArgument("queue-sizing", max_queue_sizing);
g_verbose = args.CheckCmdLineFlag("v");
if (instrumented) {
RunTests<VertexId, Value, SizeT, true>(
graph,
src,
ref_filename,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
context);
} else {
RunTests<VertexId, Value, SizeT, false>(
graph,
src,
ref_filename,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
context);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help"))) {
Usage();
return 1;
}
//DeviceInit(args);
//hipSetDeviceFlags(hipDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
g_undirected = true;
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1) {
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market") {
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier type
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default value for stream_from_host is false
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2 || graph_args == 3) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) //no inverse graph
{
return 1;
}
csr.PrintHistogram();
//csr.DisplayGraph();
fflush(stdout);
// Run tests
RunTests(csr, args, *context);
} else {
// Unknown graph type
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 1de34b7b1af051942ae40a4cc92d2980aac0c73d.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_bc.cu
*
* @brief Simple test driver program for BC.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <fstream>
#include <algorithm>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BC includes
#include <gunrock/app/bc/bc_enactor.cuh>
#include <gunrock/app/bc/bc_problem.cuh>
#include <gunrock/app/bc/bc_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// Boost includes
#include <boost/config.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/connected_components.hpp>
#include <boost/graph/bc_clustering.hpp>
#include <boost/graph/iteration_macros.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bc;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf("\ntest_bc <graph type> <graph type args> [--device=<device_index>] "
"[--instrumented] [--src=<source index>] [--quick] [--v]"
"[--queue-sizing=<scale factor>] [--ref-file=<reference filename>]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
"--device=<device_index>: Set GPU device for running the graph primitive.\n"
"--undirected: If set then treat the graph as undirected graph.\n"
"--instrumented: If set then kernels keep track of queue-search_depth\n"
"and barrier duty (a relative indicator of load imbalance.)\n"
"--src=<source index>: When source index is -1, compute BC value for each\n"
"node. Otherwise, debug the delta value for one node\n"
"--quick: If set will skip the CPU validation code.\n"
"--queue-sizing Allocates a frontier queue sized at (graph-edges * <scale factor>).\n"
"Default is 1.0.\n"
"--v: If set, enable verbose output, keep track of the kernel running.\n"
"--ref-file: If set, use pre-computed result stored in ref-file to verify.\n"
);
}
/**
* @brief Displays the BC result (sigma value and BC value)
*
* @param[in] sigmas
* @param[in] bc_values
* @param[in] nodes
*/
template<typename Value, typename SizeT>
void DisplaySolution(Value *sigmas, Value *bc_values, SizeT nodes)
{
if (nodes < 40) {
printf("[");
for (SizeT i = 0; i < nodes; ++i) {
PrintValue(i);
printf(":");
PrintValue(sigmas[i]);
printf(",");
PrintValue(bc_values[i]);
printf(" ");
}
printf("]\n");
}
}
/******************************************************************************
* BC Testing Routines
*****************************************************************************/
/**
* @brief Graph edge properties (bundled properties)
*/
struct EdgeProperties
{
int weight;
};
/**
* @brief A simple CPU-based reference BC ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to graph we process on
* @param[in] bc_values Pointer to node bc value
* @param[in] ebc_values Pointer to edge bc value
* @param[in] sigmas Pointer to node sigma value
* @param[in] src VertexId of source node if there is any
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void RefCPUBC(
const Csr<VertexId, Value, SizeT> &graph,
Value *bc_values,
Value *ebc_values,
Value *sigmas,
VertexId src)
{
typedef Coo<VertexId, Value> EdgeTupleType;
EdgeTupleType *coo = (EdgeTupleType*) malloc(sizeof(EdgeTupleType) * graph.edges);
if (src == -1) {
// Perform full exact BC using BGL
using namespace boost;
typedef adjacency_list <setS, vecS, undirectedS, no_property,
EdgeProperties> Graph;
typedef Graph::vertex_descriptor Vertex;
typedef Graph::edge_descriptor Edge;
Graph G;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
add_edge(vertex(i, G), vertex(graph.column_indices[j], G), G);
}
}
typedef std::map<Edge, int> StdEdgeIndexMap;
StdEdgeIndexMap my_e_index;
typedef boost::associative_property_map< StdEdgeIndexMap > EdgeIndexMap;
EdgeIndexMap e_index(my_e_index);
int i = 0;
BGL_FORALL_EDGES(edge, G, Graph)
{
my_e_index.insert(std::pair<Edge, int>(edge, i));
++i;
}
// Define EdgeCentralityMap
std::vector< double > e_centrality_vec(boost::num_edges(G), 0.0);
// Create the external property map
boost::iterator_property_map< std::vector< double >::iterator,
EdgeIndexMap >
e_centrality_map(e_centrality_vec.begin(), e_index);
// Define VertexCentralityMap
typedef boost::property_map< Graph, boost::vertex_index_t>::type
VertexIndexMap;
VertexIndexMap v_index = get(boost::vertex_index, G);
std::vector< double > v_centrality_vec(boost::num_vertices(G), 0.0);
// Create the external property map
boost::iterator_property_map< std::vector< double >::iterator,
VertexIndexMap>
v_centrality_map(v_centrality_vec.begin(), v_index);
//
// Perform BC
//
CpuTimer cpu_timer;
cpu_timer.Start();
brandes_betweenness_centrality( G, v_centrality_map, e_centrality_map );
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
BGL_FORALL_VERTICES(vertex, G, Graph)
{
bc_values[vertex] = (Value)v_centrality_map[vertex];
}
int idx = 0;
BGL_FORALL_EDGES(edge, G, Graph)
{
coo[idx].row = source(edge, G);
coo[idx].col = target(edge, G);
coo[idx++].val = (Value)e_centrality_map[edge];
coo[idx].col = source(edge, G);
coo[idx].row = target(edge, G);
coo[idx++].val = (Value)e_centrality_map[edge];
}
std::stable_sort(coo, coo+graph.edges, RowFirstTupleCompare<EdgeTupleType>);
for (idx = 0; idx < graph.edges; ++idx) {
//std::cout << coo[idx].row << "," << coo[idx].col << ":" << coo[idx].val << std::endl;
ebc_values[idx] = coo[idx].val;
}
printf("CPU BC finished in %lf msec.", elapsed);
}
else {
//Simple BFS pass to get single pass BC
VertexId *source_path = new VertexId[graph.nodes];
//initialize distances
for (VertexId i = 0; i < graph.nodes; ++i) {
source_path[i] = -1;
bc_values[i] = 0;
sigmas[i] = 0;
}
source_path[src] = 0;
VertexId search_depth = 0;
sigmas[src] = 1;
// Initialize queue for managing previously-discovered nodes
std::deque<VertexId> frontier;
frontier.push_back(src);
//
//Perform one pass of BFS for one source
//
CpuTimer cpu_timer;
cpu_timer.Start();
while (!frontier.empty()) {
// Dequeue node from frontier
VertexId dequeued_node = frontier.front();
frontier.pop_front();
VertexId neighbor_dist = source_path[dequeued_node] + 1;
// Locate adjacency list
int edges_begin = graph.row_offsets[dequeued_node];
int edges_end = graph.row_offsets[dequeued_node + 1];
for (int edge = edges_begin; edge < edges_end; ++edge) {
// Lookup neighbor and enqueue if undiscovered
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == -1) {
source_path[neighbor] = neighbor_dist;
sigmas[neighbor] += sigmas[dequeued_node];
if (search_depth < neighbor_dist) {
search_depth = neighbor_dist;
}
frontier.push_back(neighbor);
}
else {
if (source_path[neighbor] == source_path[dequeued_node]+1)
sigmas[neighbor] += sigmas[dequeued_node];
}
}
}
search_depth++;
for (int iter = search_depth - 2; iter > 0; --iter)
{
for (int node = 0; node < graph.nodes; ++node)
{
if (source_path[node] == iter) {
int edges_begin = graph.row_offsets[node];
int edges_end = graph.row_offsets[node+1];
for (int edge = edges_begin; edge < edges_end; ++edge) {
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == iter + 1) {
bc_values[node] +=
1.0f * sigmas[node] / sigmas[neighbor] *
(1.0f + bc_values[neighbor]);
}
}
}
}
}
for (int i = 0; i < graph.nodes; ++i)
{
bc_values[i] *= 0.5f;
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
printf("CPU BFS finished in %lf msec. Search depth is:%d\n",
elapsed, search_depth);
delete[] source_path;
}
free(coo);
}
/**
* @brief Run betweenness centrality tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph object defined in main driver
* @param[in] src
* @param[in] ref_filename
* @param[in] max_grid_size
* @param[in] num_gpus
* @param[in] max_queue_sizing
* @param[in] iterations Number of iterations for running the test
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
std::string &ref_filename,
int max_grid_size,
int num_gpus,
double max_queue_sizing,
int iterations,
CudaContext& context)
{
typedef BCProblem<
VertexId,
SizeT,
Value,
true, // MARK_PREDECESSORS
false> Problem; //does not use double buffer
// Allocate host-side array (for both reference and gpu-computed results)
Value *reference_bc_values = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *reference_ebc_values = (Value*)malloc(sizeof(Value) * graph.edges);
Value *reference_sigmas = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_sigmas = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_bc_values = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_ebc_values = (Value*)malloc(sizeof(Value) * graph.edges);
Value *reference_check_bc_values = (g_quick) ? NULL : reference_bc_values;
Value *reference_check_ebc_values = (g_quick || (src != -1)) ? NULL : reference_ebc_values;
Value *reference_check_sigmas = (g_quick || (src == -1)) ? NULL : reference_sigmas;
// Allocate BC enactor map
BCEnactor<INSTRUMENT> bc_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
num_gpus), "BC Problem Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU BC solution for source-distance
//
if (reference_check_bc_values != NULL) {
if (ref_filename.empty()) {
printf("Computing reference value ...\n");
RefCPUBC(
graph,
reference_check_bc_values,
reference_check_ebc_values,
reference_check_sigmas,
src);
printf("\n");
} else {
std::ifstream fin;
fin.open(ref_filename.c_str(), std::ios::binary);
for ( int i = 0; i < graph.nodes; ++i )
{
fin.read(reinterpret_cast<char*>(&reference_check_bc_values[i]), sizeof(Value));
}
fin.close();
}
}
double avg_duty = 0.0;
// Perform BC
GpuTimer gpu_timer;
VertexId start_src;
VertexId end_src;
if (src == -1)
{
start_src = 0;
end_src = graph.nodes;
}
else
{
start_src = src;
end_src = src+1;
}
float elapsed = 0.0f;
for (int iter = 0; iter < iterations; ++iter)
{
util::MemsetKernel<<<128, 128>>>
(csr_problem->data_slices[0]->d_bc_values, (Value)0.0f, (int)graph.nodes);
gpu_timer.Start();
for (VertexId i = start_src; i < end_src; ++i)
{
util::GRError(csr_problem->Reset(i, bc_enactor.GetFrontierType(), max_queue_sizing), "BC Problem Data Reset Failed", __FILE__, __LINE__);
util::GRError(bc_enactor.template Enact<Problem>(context, csr_problem, i, max_grid_size), "BC Problem Enact Failed", __FILE__, __LINE__);
}
util::MemsetScaleKernel<<<128, 128>>>
(csr_problem->data_slices[0]->d_bc_values, (Value)0.5f, (int)graph.nodes);
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
}
elapsed /= iterations;
bc_enactor.GetStatistics(avg_duty);
// Copy out results
util::GRError(csr_problem->Extract(h_sigmas, h_bc_values, h_ebc_values), "BC Problem Data Extraction Failed", __FILE__, __LINE__);
/*printf("edge bc values: %d\n", graph.edges);
for (int i = 0; i < graph.edges; ++i) {
printf("%5f, %5f\n", h_ebc_values[i], reference_check_ebc_values[i]);
}
printf("edge bc values end\n");*/
// Verify the result
if (reference_check_bc_values != NULL) {
printf("Validity BC Value: ");
int num_error = CompareResults(h_bc_values, reference_check_bc_values, graph.nodes,
true);
if (num_error > 0)
printf("Number of errors occurred: %d\n", num_error);
printf("\n");
}
if (reference_check_ebc_values != NULL) {
printf("Validity Edge BC Value: ");
CompareResults(h_ebc_values, reference_check_ebc_values, graph.edges,
true);
printf("\n");
}
if (reference_check_sigmas != NULL) {
printf("Validity Sigma: ");
CompareResults(h_sigmas, reference_check_sigmas, graph.nodes, true);
printf("\n");
}
// Display Solution
DisplaySolution(h_sigmas, h_bc_values, graph.nodes);
printf("GPU BC finished in %lf msec.\n", elapsed);
if (avg_duty != 0)
printf("\n avg CTA duty: %.2f%% \n", avg_duty * 100);
// Cleanup
if (csr_problem) delete csr_problem;
if (reference_sigmas) free(reference_sigmas);
if (reference_bc_values) free(reference_bc_values);
if (reference_ebc_values) free(reference_ebc_values);
if (h_sigmas) free(h_sigmas);
if (h_bc_values) free(h_bc_values);
cudaDeviceSynchronize();
}
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args,
CudaContext& context)
{
VertexId src = -1; // Use whatever the specified graph-type's default is
std::string src_str;
std::string ref_filename;
bool instrumented = false; // Whether or not to collect instrumentation from kernels
int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
double max_queue_sizing = 1.0; // Maximum size scaling factor for work queues (e.g., 1.0 creates n and m-element vertex and edge frontiers).
int iterations = 1;
g_quick = false; // Whether or not to skip ref validation
args.GetCmdLineArgument("iteration-num", iterations);
instrumented = args.CheckCmdLineFlag("instrumented");
args.GetCmdLineArgument("src", src_str);
args.GetCmdLineArgument("ref-file", ref_filename);
if (src_str.empty()) {
src = -1;
} else {
args.GetCmdLineArgument("src", src);
}
g_quick = args.CheckCmdLineFlag("quick");
args.GetCmdLineArgument("queue-sizing", max_queue_sizing);
g_verbose = args.CheckCmdLineFlag("v");
if (instrumented) {
RunTests<VertexId, Value, SizeT, true>(
graph,
src,
ref_filename,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
context);
} else {
RunTests<VertexId, Value, SizeT, false>(
graph,
src,
ref_filename,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
context);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help"))) {
Usage();
return 1;
}
//DeviceInit(args);
//cudaSetDeviceFlags(cudaDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
g_undirected = true;
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1) {
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market") {
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier type
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default value for stream_from_host is false
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2 || graph_args == 3) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) //no inverse graph
{
return 1;
}
csr.PrintHistogram();
//csr.DisplayGraph();
fflush(stdout);
// Run tests
RunTests(csr, args, *context);
} else {
// Unknown graph type
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
81fa633b8a294a2a0b574529b5ab8943afc95814.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void reduceKernel(int *input, int *output, int N)
{
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int sdata[];
sdata[tid] = 0;
if(i<N)
{
//each thread loads one element from global to shared mem
sdata[tid] = input[i];
//synchronise threads in this block before manipulating with the data
__syncthreads();
//do reduction in shared memory
for(int s=1; s<blockDim.x; s*=2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
//write result for this block to global mem
if(tid == 0) output[blockIdx.x] = sdata[0];
}
}
int nextPow2(int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
int main(int argc, char **argv)
{
//number of elements in the array
int N = 4000000;
//set the number of threads
int numThreads = 128;
//grid and block sizes
dim3 block(numThreads, 1, 1);
dim3 grid((N+numThreads-1)/numThreads, 1, 1);
//print the number of elements
printf("\n======================\n");
printf("Parallel reduction sum\n");
printf("======================\n\n");
printf("Total number of elements to sum: %i\n", N);
printf("Kernel launch configuration: %i blocks of %i threads\n", grid.x, block.x);
//host memory pointer
int *data_h;
//allocate host memory
data_h = (int*)malloc(N*sizeof(int));
//initialise random number generator seed based on current time
srand(time(NULL));
//generate data
for (int i=0; i<N; i++) data_h[i] = 1;
//device memory pointers
int *data_d;
int *blockSum_d;
//allocate device memory
hipMalloc((void **)&data_d, N * sizeof(int));
hipMalloc((void **)&blockSum_d, grid.x * sizeof(int));
//copy memory to device
hipMemcpy(data_d, data_h, N * sizeof(int), hipMemcpyHostToDevice);
//calculate sums on device
float timeGPU;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//level 0
printf("Level 0 kernel summing %i elements with %i blocks of %i threads...\n", N, grid.x, block.x);
hipLaunchKernelGGL(( reduceKernel), dim3(grid), dim3(block), block.x*sizeof(int), 0, data_d, blockSum_d, N);
//level 1+
int remainingElements = grid.x;
int level = 1;
while(remainingElements > 1)
{
int numThreads = (remainingElements<block.x) ? nextPow2(remainingElements) : block.x;
int numBlocks = (remainingElements + numThreads - 1) / numThreads;
printf("Level %i kernel summing %i elements with %i blocks of %i threads...\n", level, remainingElements, numBlocks, numThreads);
hipLaunchKernelGGL(( reduceKernel), dim3(numBlocks), dim3(numThreads), numThreads*sizeof(int), 0, blockSum_d, blockSum_d, remainingElements);
remainingElements = numBlocks;
level++;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeGPU, start, stop);
//copy results back to host
int sumGPU;
hipMemcpy(&sumGPU, blockSum_d, sizeof(int), hipMemcpyDeviceToHost);
//print result
printf("result: %i time: %f ms throughput: %.4f GB/s\n", sumGPU, timeGPU, 1.0e-9 * ((double)N*sizeof(int))/(timeGPU/1000));
//hipDeviceReset must be called before exiting in order for profiling and tracing tools such as Nsight and Visual Profiler to show complete traces
hipError_t cudaStatus;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
else return 0;
} | 81fa633b8a294a2a0b574529b5ab8943afc95814.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void reduceKernel(int *input, int *output, int N)
{
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int sdata[];
sdata[tid] = 0;
if(i<N)
{
//each thread loads one element from global to shared mem
sdata[tid] = input[i];
//synchronise threads in this block before manipulating with the data
__syncthreads();
//do reduction in shared memory
for(int s=1; s<blockDim.x; s*=2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
//write result for this block to global mem
if(tid == 0) output[blockIdx.x] = sdata[0];
}
}
int nextPow2(int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
int main(int argc, char **argv)
{
//number of elements in the array
int N = 4000000;
//set the number of threads
int numThreads = 128;
//grid and block sizes
dim3 block(numThreads, 1, 1);
dim3 grid((N+numThreads-1)/numThreads, 1, 1);
//print the number of elements
printf("\n======================\n");
printf("Parallel reduction sum\n");
printf("======================\n\n");
printf("Total number of elements to sum: %i\n", N);
printf("Kernel launch configuration: %i blocks of %i threads\n", grid.x, block.x);
//host memory pointer
int *data_h;
//allocate host memory
data_h = (int*)malloc(N*sizeof(int));
//initialise random number generator seed based on current time
srand(time(NULL));
//generate data
for (int i=0; i<N; i++) data_h[i] = 1;
//device memory pointers
int *data_d;
int *blockSum_d;
//allocate device memory
cudaMalloc((void **)&data_d, N * sizeof(int));
cudaMalloc((void **)&blockSum_d, grid.x * sizeof(int));
//copy memory to device
cudaMemcpy(data_d, data_h, N * sizeof(int), cudaMemcpyHostToDevice);
//calculate sums on device
float timeGPU;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//level 0
printf("Level 0 kernel summing %i elements with %i blocks of %i threads...\n", N, grid.x, block.x);
reduceKernel<<<grid, block, block.x*sizeof(int)>>>(data_d, blockSum_d, N);
//level 1+
int remainingElements = grid.x;
int level = 1;
while(remainingElements > 1)
{
int numThreads = (remainingElements<block.x) ? nextPow2(remainingElements) : block.x;
int numBlocks = (remainingElements + numThreads - 1) / numThreads;
printf("Level %i kernel summing %i elements with %i blocks of %i threads...\n", level, remainingElements, numBlocks, numThreads);
reduceKernel<<<numBlocks, numThreads, numThreads*sizeof(int)>>>(blockSum_d, blockSum_d, remainingElements);
remainingElements = numBlocks;
level++;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeGPU, start, stop);
//copy results back to host
int sumGPU;
cudaMemcpy(&sumGPU, blockSum_d, sizeof(int), cudaMemcpyDeviceToHost);
//print result
printf("result: %i time: %f ms throughput: %.4f GB/s\n", sumGPU, timeGPU, 1.0e-9 * ((double)N*sizeof(int))/(timeGPU/1000));
//cudaDeviceReset must be called before exiting in order for profiling and tracing tools such as Nsight and Visual Profiler to show complete traces
cudaError_t cudaStatus;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
else return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.