source
stringlengths
3
92
c
stringlengths
26
2.25M
numerics_simpson.c
//****************************************************************************** // MIT License // // Copyright (c) 2022 Tomonobu Inayama // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. //****************************************************************************** #define NUMERICS_SIMPSON_C_ //============================================================================== // Header File Include //============================================================================== #include "numerics_simpson.h" //****************************************************************************** //! \breif Numerical integration using Simpson rule //! \remark //! //! \callgraph //! //! \param[in] integrand : Function Pointer (Integrand) //! \param[in] range : Integration Range //! \return Integrated value //****************************************************************************** F64 NumericsSimpson_Integrate(INTEGRAND integrand, const INTEGRATION_RANGE *range) { S32 i; F64 x, dx, dxp2, dxp6; F64 integrated = 0.0; F64 x0, x1, x2; dx = (range->Upper - range->Lower) / (F64)range->Iteration; dxp2 = dx / 2.0; dxp6 = dx / 6.0; #ifdef _OPENMP #pragma omp parallel for reduction(+:integrated) private(x, x0, x1, x2) #endif for (i = 0; i < range->Iteration; i++) { x = range->Lower + dx * (F64)i; x0 = integrand(x); x1 = 4.0 * integrand(x + dxp2); x2 = integrand(x + dx); integrated += dxp6 * (x0 + x1 + x2); } return integrated; } //****************************************************************************** // End of File //******************************************************************************
utils.c
/* File: utils.c */ /* This file is a part of the Corrfunc package Copyright (C) 2015-- Manodeep Sinha (manodeep@gmail.com) License: MIT LICENSE. See LICENSE file under the top-level directory at https://github.com/manodeep/Corrfunc/ */ /* A collection of C wrappers I use. Should be very obvious. The ones that are not obvious have comments before the function itself. Bugs: Please email me manodeep at gmail dot com Ver 1.0: Manodeep Sinha, 2nd April, 2012 Ver 1.1: Manodeep Sinha, 14th June, 2012 - replaced check_string_copy with a "real" wrapper to snprintf. Ver 1.2: Manodeep Sinha, Jan 8, 2012 - replaced print_time with timeval and gettimeofday */ #include<inttypes.h>//defines PRId64 for printing int64_t + includes stdint.h #include<math.h> #include<string.h> #include<limits.h> #include<stdarg.h> #include<ctype.h> #include "macros.h" #include "utils.h" #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time #include <mach/mach_time.h> /* mach_absolute_time -> really fast */ #endif #ifdef _OPENMP #include <omp.h> #endif void get_max_float(const int64_t ND1, const float *cz1, float *czmax) { float max=*czmax; for(int64_t i=0;i<ND1;i++) { if(cz1[i] > max) max = cz1[i]; } *czmax = max; } void get_max_double(const int64_t ND1, const double *cz1, double *czmax) { double max=*czmax; for(int64_t i=0;i<ND1;i++) { if(cz1[i] > max) max = cz1[i]; } *czmax = max; } int setup_bins(const char *fname,double *rmin,double *rmax,int *nbin,double **rupp) { //set up the bins according to the binned data file //the form of the data file should be <rlow rhigh ....> const int MAXBUFSIZE=1000; char buf[MAXBUFSIZE]; FILE *fp=NULL; double low,hi; const char comment='#'; const int nitems=2; int nread=0; *nbin = ((int) getnumlines(fname,comment))+1; *rupp = my_calloc(sizeof(double),*nbin+1); fp = my_fopen(fname,"r"); if(fp == NULL) { free(*rupp); return EXIT_FAILURE; } int index=1; while(1) { if(fgets(buf,MAXBUFSIZE,fp)!=NULL) { nread=sscanf(buf,"%lf %lf",&low,&hi); if(nread==nitems) { if(index==1) { *rmin=low; (*rupp)[0]=low; } (*rupp)[index] = hi; index++; } } else { break; } } *rmax = (*rupp)[index-1]; fclose(fp); (*rupp)[*nbin]=*rmax ; (*rupp)[*nbin-1]=*rmax ; return EXIT_SUCCESS; } int setup_bins_double(const char *fname,double *rmin,double *rmax,int *nbin,double **rupp) { //set up the bins according to the binned data file //the form of the data file should be <rlow rhigh ....> const int MAXBUFSIZE=1000; char buf[MAXBUFSIZE]; double low,hi; const char comment='#'; const int nitems=2; int nread=0; *nbin = ((int) getnumlines(fname,comment))+1; *rupp = my_calloc(sizeof(double),*nbin+1); FILE *fp = my_fopen(fname,"r"); if(fp == NULL) { free(*rupp); return EXIT_FAILURE; } int index=1; while(1) { if(fgets(buf,MAXBUFSIZE,fp)!=NULL) { nread=sscanf(buf,"%lf %lf",&low,&hi); if(nread==nitems) { if(index==1) { *rmin=low; (*rupp)[0]=low; } (*rupp)[index] = hi; index++; } } else { break; } } *rmax = (*rupp)[index-1]; fclose(fp); (*rupp)[*nbin]=*rmax ; (*rupp)[*nbin-1]=*rmax ; return EXIT_SUCCESS; } int setup_bins_float(const char *fname,float *rmin,float *rmax,int *nbin,float **rupp) { //set up the bins according to the binned data file //the form of the data file should be <rlow rhigh ....> const int MAXBUFSIZE=1000; char buf[MAXBUFSIZE]; float low,hi; const char comment='#'; const int nitems=2; int nread=0; *nbin = ((int) getnumlines(fname,comment))+1; *rupp = my_calloc(sizeof(float),*nbin+1); FILE *fp = my_fopen(fname,"r"); if(fp == NULL) { free(*rupp); return EXIT_FAILURE; } int index=1; while(1) { if(fgets(buf,MAXBUFSIZE,fp)!=NULL) { nread=sscanf(buf,"%f %f",&low,&hi); if(nread==nitems) { if(index==1) { *rmin=low; (*rupp)[0]=low; } (*rupp)[index] = hi; index++; } } else { break; } } *rmax = (*rupp)[index-1]; fclose(fp); (*rupp)[*nbin]=*rmax ; (*rupp)[*nbin-1]=*rmax ; return EXIT_SUCCESS; } int run_system_call(const char *execstring) { int status=system(execstring); if(status != EXIT_SUCCESS) { fprintf(stderr,"ERROR: executing system command: \n`%s'\n\n",execstring); perror(NULL); } return EXIT_FAILURE; } FILE * my_fopen(const char *fname,const char *mode) { FILE *fp = fopen(fname,mode); if(fp == NULL){ fprintf(stderr,"Could not open file `%s'\n",fname); perror(NULL); } return fp;//Could be NULL } /* The following function opens a file (if it already exists) in append mode. If the file doesn't exist, then the function creates one, calls the *header() function [which presumably prints a header to the file] and then returns the file pointer. As usual, you need to be careful with the file you are appending to -> otherwise you might end up with a ginormous file. Usually, I do a system("rm -f filename") before the loop where the file might be created/modified and remove the file from previous runs. */ FILE * my_fopen_carefully(const char *fname,void (*header)(FILE *)) { FILE *fp = fopen(fname,"r");//note I am using fopen and not my_fopen. if(fp == NULL) { /*file does not exist -> open with "w" */ fp = my_fopen(fname,"w");//using my_fopen here. if(fp != NULL) { (*header)(fp);/* print the header */ } } else { fclose(fp); fp = my_fopen(fname,"a+");//open with append mode } return fp; } size_t my_fwrite(void *ptr, size_t size, size_t nmemb, FILE *stream) { size_t nwritten; nwritten = fwrite(ptr, size, nmemb, stream); if(nwritten != nmemb){ fprintf(stderr,"I/O error (fwrite) has occured.\n"); fprintf(stderr,"Instead of reading nmemb=%zu, I got nread = %zu \n",nmemb,nwritten); perror(NULL); return -1; } return nwritten; } size_t my_fread(void *ptr, size_t size, size_t nmemb, FILE *stream) { size_t nread; nread = fread(ptr, size, nmemb, stream); if(nread != nmemb) { fprintf(stderr,"I/O error (fread) has occured.\n"); fprintf(stderr,"Instead of reading nmemb=%zu, I got nread = %zu\n",nmemb,nread); perror(NULL); return -1; } return nread; } int my_fseek(FILE *stream, long offset, int whence) { int err=fseek(stream,offset,whence); if(err != 0) { fprintf(stderr,"ERROR: Could not seek `%ld' bytes into the file..exiting\n",offset); perror(NULL); } return err; } // A real wrapper to snprintf that will exit() if the allocated buffer length // was not sufficient. Usage is the same as snprintf int my_snprintf(char *buffer,int len,const char *format, ...) { va_list args; int nwritten=0; va_start(args,format); nwritten=vsnprintf(buffer, (size_t) len, format, args ); va_end(args); if (nwritten > len || nwritten < 0) { fprintf(stderr,"ERROR: printing to string failed (wrote %d characters while only %d characters were allocated)\n",nwritten,len); fprintf(stderr,"Increase `len'=%d in the header file\n",len); return -1; } return nwritten; } int is_big_endian(void) { union { uint32_t i; char c[4]; } e = { 0x01000000 }; return e.c[0]; } void byte_swap(char * const in, const size_t size, char *out) { if(size > 16) { fprintf(stderr,"WARNING: In %s> About to byte_swap %zu bytes but no intrinsic C data-type exists with size larger than 16 bytes", __FUNCTION__, size); } //point to the last byte char *in_char = (char *) in + (size - 1UL); //point to the first byte in output char *out_char = out; //Start filling in from the front in the output string //taking input from the end of the input for(size_t i=0;i<size;i++) { *out_char = *in_char; out_char++; in_char--; } } //Taken from the inter-webs: http://stackoverflow.com/questions/1024389/print-an-int-in-binary-representation-using-c char * int2bin(int a, char *buffer, int buf_size) { buffer += (buf_size - 1); for (int i = 31; i >= 0; i--) { *buffer-- = (a & 1) + '0'; a >>= 1; } return buffer; } /* Can not remember where I (MS) got this from. Fairly sure stackoverflow was involved. Finally taken from http://stackoverflow.com/a/6719178/2237582 */ void current_utc_time(struct timespec *ts) { #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time static mach_timebase_info_data_t sTimebaseInfo = {.numer=0, .denom=0}; uint64_t start = mach_absolute_time(); if ( sTimebaseInfo.denom == 0 ) { mach_timebase_info(&sTimebaseInfo); } ts->tv_sec = 0;//(start * sTimebaseInfo.numer/sTimebaseInfo.denom) * tv_nsec; ts->tv_nsec = start * sTimebaseInfo.numer / sTimebaseInfo.denom; #if 0 //Much slower implementation for clock //Slows down the code by up to 4x clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts->tv_sec = mts.tv_sec; ts->tv_nsec = mts.tv_nsec; #endif #else clock_gettime(CLOCK_REALTIME, ts); #endif } /* I like this particular function. Generic replacement for printing (in meaningful units) the actual execution time of a code/code segment. The function call should be like this: --------------------------- struct timeval t_start,t_end; gettimeofday(&t_start,NULL); do_something(); gettimeofday(&t_end,NULL); print_time(t_start,t_end,"do something"); --------------------------- if the code took 220 mins 30.1 secs -> print_time will output `Time taken to execute `do something' = 3 hours 40 mins 30.1 seconds (code can be easily extended to include `weeks' as a system of time unit. left to the reader) */ char * get_time_string(struct timeval t0,struct timeval t1) { const size_t MAXLINESIZE = 1024; char *time_string = my_malloc(sizeof(char), MAXLINESIZE); double timediff = t1.tv_sec - t0.tv_sec; double ratios[] = {24*3600.0, 3600.0, 60.0, 1}; if(timediff < ratios[2]) { my_snprintf(time_string, MAXLINESIZE,"%6.3lf secs",1e-6*(t1.tv_usec-t0.tv_usec) + timediff); } else { double timeleft = timediff; size_t curr_index = 0; int which = 0; while (which < 4) { double time_to_print = floor(timeleft/ratios[which]); if (time_to_print > 1) { timeleft -= (time_to_print*ratios[which]); char units[4][10] = {"days", "hrs" , "mins", "secs"}; char tmp[MAXLINESIZE]; my_snprintf(tmp, MAXLINESIZE, "%5d %s",(int)time_to_print,units[which]); const size_t len = strlen(tmp); const size_t required_len = curr_index + len + 1; XRETURN(MAXLINESIZE >= required_len, NULL, "buffer overflow will occur: string has space for %zu bytes while concatenating requires at least %zu bytes\n", MAXLINESIZE, required_len); strcpy(time_string + curr_index, tmp); curr_index += len; } which++; } } return time_string; } void print_time(struct timeval t0,struct timeval t1,const char *s) { double timediff = t1.tv_sec - t0.tv_sec; double ratios[] = {24*3600.0, 3600.0, 60.0, 1}; fprintf(stderr,"Time taken to execute '%s' = ",s); if(timediff < ratios[2]) { fprintf(stderr,"%6.3lf secs",1e-6*(t1.tv_usec-t0.tv_usec) + timediff); } else { double timeleft = timediff; int which = 0; while (which < 4) { double time_to_print = floor(timeleft/ratios[which]); if (time_to_print > 1) { char units[4][10] = {"days", "hrs" , "mins", "secs"}; timeleft -= (time_to_print*ratios[which]); fprintf(stderr,"%5d %s",(int)time_to_print,units[which]); } which++; } } fprintf(stderr,"\n"); } //wrapper for realloc. varname should contain the name of the //variable being re-allocated -> helps debugging in case of a crash. void* my_realloc(void *x,size_t size,int64_t N,const char *varname) { void *tmp = realloc(x,N*size); if (tmp==NULL) { fprintf(stderr,"ERROR: Could not reallocate for %"PRId64" elements with %zu size for variable `%s' ..aborting\n",N,size,varname); perror(NULL); } return tmp; } void* my_malloc(size_t size,int64_t N) { void *x = malloc(N*size); if (x==NULL){ fprintf(stderr,"malloc for %"PRId64" elements with %zu bytes failed...\n",N,size); perror(NULL); } return x; } void* my_calloc(size_t size,int64_t N) { void *x = calloc((size_t) N, size); if (x==NULL) { fprintf(stderr,"malloc for %"PRId64" elements with %zu size failed...\n",N,size); perror(NULL); } return x; } //real free. Use only if you are going to check the //pointer variable afterwards for NULL. void my_free(void ** x) { /* my_free(void *x) would also free the memory but then x would be a local variable and the pointer itself in the calling routine could not be set to NULL. Hence the pointer to pointer business. */ if(*x!=NULL) free(*x);//free the memory *x=NULL;//sets the pointer in the calling routine to NULL. } void **matrix_malloc(size_t size,int64_t nrow,int64_t ncol) { void **m = (void **) my_malloc(sizeof(void *),nrow); if(m == NULL) { return NULL; } for(int i=0;i<nrow;i++) { m[i] = (void *) my_malloc(size,ncol); /* Check if allocation failed */ if(m[i] == NULL) { /* Free up all the memory allocated so far */ for(int j=i-1;j>=0;j--) { free(m[j]); } free(m); return NULL; } } return m; } void **matrix_calloc(size_t size,int64_t nrow,int64_t ncol) { void **m = (void **) my_calloc(sizeof(void *),nrow); if(m == NULL) { return m; } for(int i=0;i<nrow;i++) { m[i] = (void *) my_calloc(size,ncol); /* Check if allocation failed */ if(m[i] == NULL) { /* Free up all the memory allocated so far */ for(int j=i-1;j>=0;j--) { free(m[j]); } free(m); return NULL; } } return m; } // Resize a matrix. Returns EXIT_SUCCESS or EXIT_FAILURE. // Presently only resizing the last dimension is supported, due to // potential memory leaks when shrinking the first dimension int matrix_realloc(void **matrix, size_t size, int64_t nrow, int64_t ncol){ void *tmp; for(int i = 0; i < nrow; i++){ tmp = my_realloc(matrix[i], size, ncol, "matrix_realloc"); if(tmp == NULL){ return EXIT_FAILURE; } matrix[i] = tmp; } return EXIT_SUCCESS; } void matrix_free(void **m,int64_t nrow) { if(m == NULL) return; for(int i=0;i<nrow;i++) free(m[i]); free(m); } void *** volume_malloc(size_t size,int64_t nrow,int64_t ncol,int64_t nframe) { void ***v = (void ***) my_malloc(sizeof(void **),nrow); if( v == NULL) { return NULL; } for(int i=0;i<nrow;i++) { v[i] = (void *) my_malloc(sizeof(void *),ncol); if(v[i] == NULL) { /* Free up all the memory allocated so far */ for(int jj=i-1;jj>=0;jj--) { for(int k=0;k<ncol;k++) { free(v[jj][k]); } } free(v); return NULL; } for(int j=0;j<ncol;j++) { v[i][j] = my_malloc(size,nframe); if(v[i][j] == NULL) { /* Free up all the memory allocated so far */ /* First free up all columns in this row*/ for(int k=ncol-1;k>=0;k--) { free(v[i][k]); } /* Now free all previous rows with all ncols */ for(int jj=i-1;jj>=0;jj--) { for(int k=0;k<ncol;k++) { free(v[jj][k]); } } free(v); return NULL; } } } return v; } void *** volume_calloc(size_t size,int64_t nrow,int64_t ncol,int64_t nframe) { void ***v = (void ***) my_malloc(sizeof(void **),nrow); if(v == NULL) { return NULL; } for(int i=0;i<nrow;i++) { v[i] = (void *) my_malloc(sizeof(void *),ncol); if(v[i] == NULL) { /* Free up all the memory allocated so far */ for(int jj=i-1;jj>=0;jj--) { for(int k=0;k<ncol;k++) { free(v[jj][k]); } } free(v); return NULL; } for(int j=0;j<ncol;j++) { v[i][j] = my_calloc(size,nframe); if(v[i][j] == NULL) { /* Free up all the memory allocated so far */ /* First free up all columns in this row*/ for(int k=ncol-1;k>=0;k--) { free(v[i][k]); } /* Now free all previous rows with all ncols */ for(int jj=i-1;jj>=0;jj--) { for(int k=0;k<ncol;k++) { free(v[j][k]); } } free(v); return NULL; } } } return v; } void volume_free(void ***v,int64_t nrow,int64_t ncol) { for(int i=0;i<nrow;i++) { for(int j=0;j<ncol;j++) { free(v[i][j]); } free(v[i]); } free(v); } int64_t getnumlines(const char *fname,const char comment) { const int MAXLINESIZE = 10000; int64_t nlines=0; char str_line[MAXLINESIZE]; FILE *fp = my_fopen(fname,"rt"); if(fp == NULL) { return -1; } while(1){ if(fgets(str_line, MAXLINESIZE,fp)!=NULL) { /* fgets always terminates the string with a '\0' on a successful read */ char *c = &str_line[0]; while(*c != '\0' && isspace(*c)) { c++; } if(*c != '\0' && *c !=comment) { nlines++; } } else { break; } } fclose(fp); return nlines; } int test_all_files_present(const int nfiles, ...) { /* sets i'th bit for i'th missing file return value is 0 *iff* all files are present and readable. */ int absent=0; va_list filenames; va_start(filenames, nfiles); XASSERT(nfiles <= 31, "Can only test for 31 files simultaneously. nfiles = %d \n",nfiles); for(int i=0;i<nfiles;i++) { const char *f = va_arg(filenames, const char *); FILE *fp = fopen(f,"r"); if(fp == NULL) { absent |= 1; } else { fclose(fp); } absent <<= 1; } va_end(filenames); return absent; } /* int float_almost_equal(const float A, const float B, int maxUlps) */ /* { */ /* /\* MS -- taken from */ /* http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm */ /* *\/ */ /* const int upper_limit_maxulps = 4 * 1024 * 1024; */ /* /\* Make sure maxUlps is non-negative and small enough that the */ /* default NAN won't compare as equal to anything.*\/ */ /* if(maxUlps <= 0 || maxUlps >= upper_limit_maxulps){ */ /* fprintf(stderr,"Error: Comparison between floats should have smaller number of max. units in last place. Found maxUlps = %d (max allowed = %d)\n", */ /* maxUlps, upper_limit_maxulps); */ /* return EXIT_FAILURE; */ /* } */ /* int aInt = *(int*)&A; */ /* /\* Make aInt lexicographically ordered as a twos-complement int*\/ */ /* if (aInt < 0) */ /* aInt = 0x80000000 - aInt; */ /* /\* Make bInt lexicographically ordered as a twos-complement int*\/ */ /* int bInt = *(int*)&B; */ /* if (bInt < 0) */ /* bInt = 0x80000000 - bInt; */ /* int intDiff = abs(aInt - bInt); */ /* if (intDiff <= maxUlps) */ /* return 1; */ /* return 0; */ /* } */ /* Directly taken from https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ */ int AlmostEqualRelativeAndAbs_float(float A, float B, const float maxDiff, const float maxRelDiff) { // Check if the numbers are really close -- needed // when comparing numbers near zero. float diff = fabsf(A - B); if (diff <= maxDiff) return EXIT_SUCCESS; A = fabsf(A); B = fabsf(B); float largest = (B > A) ? B : A; if (diff <= largest * maxRelDiff) return EXIT_SUCCESS; return EXIT_FAILURE; } int AlmostEqualRelativeAndAbs_double(double A, double B, const double maxDiff, const double maxRelDiff) { // Check if the numbers are really close -- needed // when comparing numbers near zero. double diff = fabs(A - B); if (diff <= maxDiff) return EXIT_SUCCESS; A = fabs(A); B = fabs(B); double largest = (B > A) ? B : A; if (diff <= largest * maxRelDiff) return EXIT_SUCCESS; /* fprintf(stderr,"diff = %e largest * maxRelDiff = %e\n", diff, largest * maxRelDiff); */ return EXIT_FAILURE; } /* #undef __USE_XOPEN2K */ /* A parallel cumulative sum Output convention is: cumsum[0] = 0; cumsum[N-1] = sum(a[0:N-1]); The algorithm is: - Divide the array into `nthreads` chunks - cumsum within each chunk - compute the "offset" for each chunk by summing the cumsum at the tail of all previous chunks - apply the offset */ void parallel_cumsum(const int64_t *a, const int64_t N, int64_t *cumsum){ if (N <= 0){ return; // nothing to do } #ifdef _OPENMP int nthreads = omp_get_max_threads(); #else int nthreads = 1; #endif // We will heuristically limit the number of threads // if there isn't enough work for multithreading to be efficient. // This is also important for the correctness of the algorithm below, // since it enforces nthreads <= N int64_t min_N_per_thread = 10000; if(N/min_N_per_thread < nthreads){ nthreads = N/min_N_per_thread; } if(nthreads < 1){ nthreads = 1; } #ifdef _OPENMP #pragma omp parallel num_threads(nthreads) #endif { #ifdef _OPENMP int tid = omp_get_thread_num(); #else int tid = 0; #endif int64_t cstart = N*tid/nthreads; int64_t cend = N*(tid+1)/nthreads; cumsum[cstart] = cstart > 0 ? a[cstart-1] : 0; for(int64_t c = cstart+1; c < cend; c++){ cumsum[c] = a[c-1] + cumsum[c-1]; } #ifdef _OPENMP #pragma omp barrier #endif int64_t offset = 0; for(int t = 0; t < tid; t++){ offset += cumsum[N*(t+1)/nthreads-1]; } #ifdef _OPENMP #pragma omp barrier #endif if(offset != 0){ for(int64_t c = cstart; c < cend; c++){ cumsum[c] += offset; } } } }
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define FOURCC_DX10 0x30315844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #define DDSEXT_DIMENSION_TEX2D 0x00000003 #define DDSEXTFLAGS_CUBEMAP 0x00000004 typedef enum DXGI_FORMAT { DXGI_FORMAT_UNKNOWN, DXGI_FORMAT_R32G32B32A32_TYPELESS, DXGI_FORMAT_R32G32B32A32_FLOAT, DXGI_FORMAT_R32G32B32A32_UINT, DXGI_FORMAT_R32G32B32A32_SINT, DXGI_FORMAT_R32G32B32_TYPELESS, DXGI_FORMAT_R32G32B32_FLOAT, DXGI_FORMAT_R32G32B32_UINT, DXGI_FORMAT_R32G32B32_SINT, DXGI_FORMAT_R16G16B16A16_TYPELESS, DXGI_FORMAT_R16G16B16A16_FLOAT, DXGI_FORMAT_R16G16B16A16_UNORM, DXGI_FORMAT_R16G16B16A16_UINT, DXGI_FORMAT_R16G16B16A16_SNORM, DXGI_FORMAT_R16G16B16A16_SINT, DXGI_FORMAT_R32G32_TYPELESS, DXGI_FORMAT_R32G32_FLOAT, DXGI_FORMAT_R32G32_UINT, DXGI_FORMAT_R32G32_SINT, DXGI_FORMAT_R32G8X24_TYPELESS, DXGI_FORMAT_D32_FLOAT_S8X24_UINT, DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS, DXGI_FORMAT_X32_TYPELESS_G8X24_UINT, DXGI_FORMAT_R10G10B10A2_TYPELESS, DXGI_FORMAT_R10G10B10A2_UNORM, DXGI_FORMAT_R10G10B10A2_UINT, DXGI_FORMAT_R11G11B10_FLOAT, DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_FORMAT_R8G8B8A8_UNORM_SRGB, DXGI_FORMAT_R8G8B8A8_UINT, DXGI_FORMAT_R8G8B8A8_SNORM, DXGI_FORMAT_R8G8B8A8_SINT, DXGI_FORMAT_R16G16_TYPELESS, DXGI_FORMAT_R16G16_FLOAT, DXGI_FORMAT_R16G16_UNORM, DXGI_FORMAT_R16G16_UINT, DXGI_FORMAT_R16G16_SNORM, DXGI_FORMAT_R16G16_SINT, DXGI_FORMAT_R32_TYPELESS, DXGI_FORMAT_D32_FLOAT, DXGI_FORMAT_R32_FLOAT, DXGI_FORMAT_R32_UINT, DXGI_FORMAT_R32_SINT, DXGI_FORMAT_R24G8_TYPELESS, DXGI_FORMAT_D24_UNORM_S8_UINT, DXGI_FORMAT_R24_UNORM_X8_TYPELESS, DXGI_FORMAT_X24_TYPELESS_G8_UINT, DXGI_FORMAT_R8G8_TYPELESS, DXGI_FORMAT_R8G8_UNORM, DXGI_FORMAT_R8G8_UINT, DXGI_FORMAT_R8G8_SNORM, DXGI_FORMAT_R8G8_SINT, DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_FLOAT, DXGI_FORMAT_D16_UNORM, DXGI_FORMAT_R16_UNORM, DXGI_FORMAT_R16_UINT, DXGI_FORMAT_R16_SNORM, DXGI_FORMAT_R16_SINT, DXGI_FORMAT_R8_TYPELESS, DXGI_FORMAT_R8_UNORM, DXGI_FORMAT_R8_UINT, DXGI_FORMAT_R8_SNORM, DXGI_FORMAT_R8_SINT, DXGI_FORMAT_A8_UNORM, DXGI_FORMAT_R1_UNORM, DXGI_FORMAT_R9G9B9E5_SHAREDEXP, DXGI_FORMAT_R8G8_B8G8_UNORM, DXGI_FORMAT_G8R8_G8B8_UNORM, DXGI_FORMAT_BC1_TYPELESS, DXGI_FORMAT_BC1_UNORM, DXGI_FORMAT_BC1_UNORM_SRGB, DXGI_FORMAT_BC2_TYPELESS, DXGI_FORMAT_BC2_UNORM, DXGI_FORMAT_BC2_UNORM_SRGB, DXGI_FORMAT_BC3_TYPELESS, DXGI_FORMAT_BC3_UNORM, DXGI_FORMAT_BC3_UNORM_SRGB, DXGI_FORMAT_BC4_TYPELESS, DXGI_FORMAT_BC4_UNORM, DXGI_FORMAT_BC4_SNORM, DXGI_FORMAT_BC5_TYPELESS, DXGI_FORMAT_BC5_UNORM, DXGI_FORMAT_BC5_SNORM, DXGI_FORMAT_B5G6R5_UNORM, DXGI_FORMAT_B5G5R5A1_UNORM, DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8X8_UNORM, DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM, DXGI_FORMAT_B8G8R8A8_TYPELESS, DXGI_FORMAT_B8G8R8A8_UNORM_SRGB, DXGI_FORMAT_B8G8R8X8_TYPELESS, DXGI_FORMAT_B8G8R8X8_UNORM_SRGB, DXGI_FORMAT_BC6H_TYPELESS, DXGI_FORMAT_BC6H_UF16, DXGI_FORMAT_BC6H_SF16, DXGI_FORMAT_BC7_TYPELESS, DXGI_FORMAT_BC7_UNORM, DXGI_FORMAT_BC7_UNORM_SRGB, DXGI_FORMAT_AYUV, DXGI_FORMAT_Y410, DXGI_FORMAT_Y416, DXGI_FORMAT_NV12, DXGI_FORMAT_P010, DXGI_FORMAT_P016, DXGI_FORMAT_420_OPAQUE, DXGI_FORMAT_YUY2, DXGI_FORMAT_Y210, DXGI_FORMAT_Y216, DXGI_FORMAT_NV11, DXGI_FORMAT_AI44, DXGI_FORMAT_IA44, DXGI_FORMAT_P8, DXGI_FORMAT_A8P8, DXGI_FORMAT_B4G4R4A4_UNORM, DXGI_FORMAT_P208, DXGI_FORMAT_V208, DXGI_FORMAT_V408, DXGI_FORMAT_SAMPLER_FEEDBACK_MIN_MIP_OPAQUE, DXGI_FORMAT_SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE, DXGI_FORMAT_FORCE_UINT } DXGI_FORMAT; #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2, extFormat, extDimension, extFlags, extArraySize, extFlags2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColorLookup { DDSSourceBlock sources[2]; } DDSSingleColorLookup; typedef MagickBooleanType DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *); typedef MagickBooleanType DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *); static const DDSSingleColorLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColorLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColorLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ static MagickBooleanType WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ /* Read optional DX10 header if available */ if ((dds_info->pixelformat.flags & DDPF_FOURCC) && (dds_info->pixelformat.fourcc == FOURCC_DX10)) { dds_info->extFormat = ReadBlobLSBLong(image); dds_info->extDimension = ReadBlobLSBLong(image); dds_info->extFlags = ReadBlobLSBLong(image); dds_info->extArraySize = ReadBlobLSBLong(image); dds_info->extFlags2 = ReadBlobLSBLong(image); } else { dds_info->extFormat = 0; dds_info->extDimension = 0; dds_info->extFlags = 0; dds_info->extArraySize = 0; dds_info->extFlags2 = 0; } return MagickTrue; } static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y, DDSColors colors,size_t bits,Quantum *q) { ssize_t i; ssize_t j; unsigned char code; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q); if ((colors.a[code] != 0) && (image->alpha_trait == UndefinedPixelTrait)) return(MagickFalse); q+=GetPixelChannels(image); } } } return(MagickTrue); } static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception) { MagickBooleanType status; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } status=MagickTrue; if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { AcquireNextImage(image_info,image,exception); if (image->next == (Image *) NULL) return(MagickFalse); image->next->alpha_trait=image->alpha_trait; image=SyncNextImageInList(image); status=SetImageExtent(image,w,h,exception); if (status == MagickFalse) break; status=decoder(image,dds_info,exception); if (status == MagickFalse) break; if ((w == 1) && (h == 1)) break; w=DIV2(w); h=DIV2(h); } } return(status); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static MagickBooleanType ReadDXT1Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; Quantum *q; ssize_t x; size_t bits; ssize_t y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 8 bytes of data from the image */ c0=ReadBlobLSBShort(image); c1=ReadBlobLSBShort(image); bits=ReadBlobLSBLong(image); CalculateColors(c0,c1,&colors,MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse) { /* Correct alpha */ SetImageAlpha(image,QuantumRange,exception); q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q != (Quantum *) NULL) SetDXT1Pixels(image,x,y,colors,bits,q); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; Quantum *q; ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; Quantum *q; ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGBPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { Quantum *q; ssize_t x, y; unsigned short color; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 8 || dds_info->extFormat == DXGI_FORMAT_R8_UNORM) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16 || dds_info->extFormat == DXGI_FORMAT_B5G6R5_UNORM) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32 || dds_info->extFormat == DXGI_FORMAT_B8G8R8X8_UNORM) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)w*h*pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (dds_info->pixelformat.rgb_bitcount == 8 || dds_info->extFormat == DXGI_FORMAT_R8_UNORM) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBAPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } if (dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM) alphaBits=1; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 16 || dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else if (dds_info->extFormat == DXGI_FORMAT_R8G8B8A8_UNORM || IsBitMask(dds_info->pixelformat,0x000000ff,0x0000ff00,0x00ff0000,0xff000000)) { SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,4,exception)); } static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { const char *option; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; Image *image; MagickBooleanType status, cubemap, volume, read_mipmaps; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cubemap=MagickFalse, volume=MagickFalse, read_mipmaps=MagickFalse; image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } case FOURCC_DX10: { if (dds_info.extDimension != DDSEXT_DIMENSION_TEX2D) { ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } switch (dds_info.extFormat) { case DXGI_FORMAT_R8_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_B5G6R5_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_B5G5R5A1_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_B8G8R8A8_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_R8G8B8A8_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_B8G8R8X8_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_BC1_UNORM: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case DXGI_FORMAT_BC2_UNORM: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case DXGI_FORMAT_BC3_UNORM: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown format */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } if (dds_info.extFlags & DDSEXTFLAGS_CUBEMAP) cubemap = MagickTrue; num_images = dds_info.extArraySize; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse) ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); option=GetImageOption(image_info,"dds:skip-mipmaps"); if (IsStringFalse(option) != MagickFalse) read_mipmaps=MagickTrue; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression=compression; image->columns=dds_info.width; image->rows=dds_info.height; image->storage_class=DirectClass; image->endian=LSBEndian; image->depth=8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) SetImageBackgroundColor(image,exception); status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception); if (status == MagickFalse) { (void) CloseBlob(image); if (n == 0) return(DestroyImageList(image)); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColorLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { ssize_t x; ssize_t i, y, bx, by; const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { const Quantum *p; ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info, const size_t pixelFormat,const size_t compression,const size_t mipmaps, const MagickBooleanType fromlist,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,ExceptionInfo *exception) { const char *option; Image *mipmap_image, *resize_image; MagickBooleanType fast_mipmaps, status; ssize_t i; size_t columns, rows; columns=DIV2(image->columns); rows=DIV2(image->rows); option=GetImageOption(image_info,"dds:fast-mipmaps"); fast_mipmaps=IsStringTrue(option); mipmap_image=image; resize_image=image; status=MagickTrue; for (i=0; i < (ssize_t) mipmaps; i++) { if (fromlist == MagickFalse) { mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter, exception); if (mipmap_image == (Image *) NULL) { status=MagickFalse; break; } } else { mipmap_image=mipmap_image->next; if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows)) ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported", image->filename); } DestroyBlob(mipmap_image); mipmap_image->blob=ReferenceBlob(image->blob); WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); if (fromlist == MagickFalse) { if (fast_mipmaps == MagickFalse) mipmap_image=DestroyImage(mipmap_image); else { if (resize_image != image) resize_image=DestroyImage(resize_image); resize_image=mipmap_image; } } columns=DIV2(columns); rows=DIV2(rows); } if (resize_image != image) resize_image=DestroyImage(resize_image); return(status); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) memset(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) /* bitcount / masks */ (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) /* ddscaps2 + reserved region */ (void) WriteBlobLSBLong(image,0x00); } static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, fromlist, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; if (image_info->compression == DXT1Compression) compression=FOURCC_DXT1; else if (image_info->compression == NoCompression) pixelFormat=DDPF_RGB; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } mipmaps=0; fromlist=MagickFalse; option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) { if (LocaleNCompare(option,"fromlist",8) == 0) { Image *next; fromlist=MagickTrue; next=image->next; while(next != (Image *) NULL) { mipmaps++; next=next->next; } } } if ((mipmaps == 0) && ((image->columns & (image->columns - 1)) == 0) && ((image->rows & (image->rows - 1)) == 0)) { maxMipmaps=SIZE_MAX; if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } option=GetImageOption(image_info,"dds:raw"); if (IsStringTrue(option) == MagickFalse) WriteDDSInfo(image,pixelFormat,compression,mipmaps); else mipmaps=0; WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression, mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse)) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/SemaCheerp.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op, bool isCheerpSafe = false); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens, bool isCheerpSafe); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool noInit); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCheerpBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); void CheckCheerpAttributesConsistency(NamedDecl* New, NamedDecl* Old, bool newIsDefinition); // CHEERP: Inject asmjs/genericjs attribute if required void MaybeInjectCheerpModeAttr(Decl* D); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: cheerp::CheerpSemaData cheerpSemaData; /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; // CHEERP: Utility function for checking if a type can be used in the asmjs section bool isAsmJSCompatible(QualType qt, bool allowAnyref) { const Type* pt = qt.getNonReferenceType().getTypePtr(); while (pt->isAnyPointerType()) pt = pt->getPointeeType().getTypePtr(); if (pt->isArrayType()) pt = pt->getArrayElementTypeNoTypeQual(); TagDecl* pd = pt->getAsTagDecl(); if (!pd) return true; bool client = pd->getDeclContext()->isClientNamespace(); return (client&&allowAnyref) || pd->isEnum() || pd->hasAttr<AsmJSAttr>(); } static AsmJSAttr* getAsmJSAttr(QualType qt) { const Type* pt = qt.getNonReferenceType().getTypePtr(); while (pt->isAnyPointerType()) pt = pt->getPointeeType().getTypePtr(); if (pt->isArrayType()) pt = pt->getArrayElementTypeNoTypeQual(); TagDecl* pd = pt->getAsTagDecl(); assert(pd && "Not a TagDecl"); assert(pd->hasAttr<AsmJSAttr>() && "No AsmJSAttr"); return pd->getAttr<AsmJSAttr>(); } static GenericJSAttr* getGenericJSAttr(QualType qt) { const Type* pt = qt.getNonReferenceType().getTypePtr(); while (pt->isAnyPointerType()) pt = pt->getPointeeType().getTypePtr(); if (pt->isArrayType()) pt = pt->getArrayElementTypeNoTypeQual(); TagDecl* pd = pt->getAsTagDecl(); assert(pd && "Not a TagDecl"); assert(pd->hasAttr<GenericJSAttr>() && "No GenericJSAttr"); return pd->getAttr<GenericJSAttr>(); } // CHEERP: Utility function for checking if a type is an asmjs value type static bool isAsmJSValue(QualType qt) { const Type* t = qt->isArrayType() ? qt->getArrayElementTypeNoTypeQual() : qt.getTypePtr(); TagDecl* pd = t->getAsTagDecl(); return pd && pd->hasAttr<AsmJSAttr>(); } // CHEERP: Utility function for checking if a type is a genericjs value type static bool isGenericJSValue(QualType qt) { const Type* t = qt->isArrayType() ? qt->getArrayElementTypeNoTypeQual() : qt.getTypePtr(); TagDecl* pd = t->getAsTagDecl(); return pd && pd->hasAttr<GenericJSAttr>(); } // CHEERP: Disallow calls to asmjs functions with pointer to basic type parameters from genericjs // and calls to functions with pointer to function parameters both ways void CheckCheerpFFICall(const FunctionDecl* Parent, const FunctionDecl* FDecl, const SourceLocation Loc, const llvm::ArrayRef<Expr*> Args); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
backprop.c
/* ****************************************************************** * HISTORY * 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University * Prepared for 15-681, Fall 1994. * Modified by Shuai Che ****************************************************************** */ /* Copyright (c)2008-2011 University of Virginia All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted without royalty fees or other restrictions, provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University of Virginia, the Dept. of Computer Science, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF VIRGINIA OR THE SOFTWARE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include "backprop.h" #include <math.h> #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #define ABS(x) (((x) > 0.0) ? (x) : (-(x))) #define fastcopy(to,from,len)\ {\ register char *_to,*_from;\ register int _i,_l;\ _to = (char *)(to);\ _from = (char *)(from);\ _l = (len);\ for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\ } /*** Return random number between 0.0 and 1.0 ***/ float drnd() { return ((float) rand() / (float) BIGRND); } /*** Return random number between -1.0 and 1.0 ***/ float dpn1() { return ((drnd() * 2.0) - 1.0); } /*** The squashing function. Currently, it's a sigmoid. ***/ float squash(float x) { float m; //x = -x; //m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120; //return(1.0 / (1.0 + m)); return (1.0 / (1.0 + exp(-x))); } /*** Allocate 1d array of floats ***/ float *alloc_1d_dbl(int n) { float *new_t; new_t = (float *) malloc ((unsigned) (n * sizeof (float))); if (new_t == NULL) { fprintf(stderr, "ALLOC_1D_DBL: Couldn't allocate array of floats\n"); return (NULL); } return (new_t); } /*** Allocate 2d array of floats ***/ float **alloc_2d_dbl(int m, int n) { int i; float **new_t; new_t = (float **) malloc ((unsigned) (m * sizeof (float *))); if (new_t == NULL) { fprintf(stderr, "ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n"); return (NULL); } for (i = 0; i < m; i++) { new_t[i] = alloc_1d_dbl(n); } return (new_t); } void bpnn_randomize_weights(float **w, int m, int n) { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = (float) rand()/RAND_MAX; // w[i][j] = dpn1(); } } } void bpnn_randomize_row(float *w, int m) { int i; for (i = 0; i <= m; i++) { //w[i] = (float) rand()/RAND_MAX; w[i] = 0.1; } } void bpnn_zero_weights(float **w, int m, int n) { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = 0.0; } } } void bpnn_initialize(int seed) { fprintf(stderr, "Random number generator seed: %d\n", seed); srand(seed); } BPNN *bpnn_internal_create(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = (BPNN *) malloc (sizeof (BPNN)); if (newnet == NULL) { fprintf(stderr, "BPNN_CREATE: Couldn't allocate neural network\n"); return (NULL); } newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; newnet->input_units = alloc_1d_dbl(n_in + 1); newnet->hidden_units = alloc_1d_dbl(n_hidden + 1); newnet->output_units = alloc_1d_dbl(n_out + 1); newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1); newnet->output_delta = alloc_1d_dbl(n_out + 1); newnet->target = alloc_1d_dbl(n_out + 1); newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); return (newnet); } void bpnn_free(BPNN *net) { int n1, n2, i; n1 = net->input_n; n2 = net->hidden_n; free((char *) net->input_units); free((char *) net->hidden_units); free((char *) net->output_units); free((char *) net->hidden_delta); free((char *) net->output_delta); free((char *) net->target); for (i = 0; i <= n1; i++) { free((char *) net->input_weights[i]); free((char *) net->input_prev_weights[i]); } free((char *) net->input_weights); free((char *) net->input_prev_weights); for (i = 0; i <= n2; i++) { free((char *) net->hidden_weights[i]); free((char *) net->hidden_prev_weights[i]); } free((char *) net->hidden_weights); free((char *) net->hidden_prev_weights); free((char *) net); } /*** Creates a new fully-connected network from scratch, with the given numbers of input, hidden, and output units. Threshold units are automatically included. All weights are randomly initialized. Space is also allocated for temporary storage (momentum weights, error computations, etc). ***/ BPNN *bpnn_create(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = bpnn_internal_create(n_in, n_hidden, n_out); #ifdef INITZERO bpnn_zero_weights(newnet->input_weights, n_in, n_hidden); #else bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden); #endif bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out); bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden); bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out); bpnn_randomize_row(newnet->target, n_out); return (newnet); } void bpnn_layerforward(float *l1, float *l2, float **conn, int n1, int n2) { float sum; int j, k; /*** Set up thresholding unit ***/ l1[0] = 1.0; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static) #endif /*** For each unit in second layer ***/ for (j = 1; j <= n2; j++) { /*** Compute weighted sum of its inputs ***/ sum = 0.0; for (k = 0; k <= n1; k++) { sum += conn[k][j] * l1[k]; } l2[j] = squash(sum); } } void bpnn_output_error(float *delta, float *target, float *output, int nj, float *err) { int j; float o, t, errsum; errsum = 0.0; for (j = 1; j <= nj; j++) { o = output[j]; t = target[j]; delta[j] = o * (1.0 - o) * (t - o); errsum += ABS(delta[j]); } *err = errsum; } void bpnn_hidden_error(float *delta_h, int nh, float *delta_o, int no, float **who, float *hidden, float *err) { int j, k; float h, sum, errsum; errsum = 0.0; for (j = 1; j <= nh; j++) { h = hidden[j]; sum = 0.0; for (k = 1; k <= no; k++) { sum += delta_o[k] * who[j][k]; } delta_h[j] = h * (1.0 - h) * sum; errsum += ABS(delta_h[j]); } *err = errsum; } void bpnn_adjust_weights(float *delta, int ndelta, float *ly, int nly, float **w, float **oldw) { float new_dw; int k, j; ly[0] = 1.0; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for \ shared(oldw, w, delta) \ private(j, k, new_dw) \ firstprivate(ndelta, nly, momentum) #endif for (j = 1; j <= ndelta; j++) { for (k = 0; k <= nly; k++) { new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j])); w[k][j] += new_dw; oldw[k][j] = new_dw; } } } void bpnn_feedforward(BPNN *net) { int in, hid, out; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); } void bpnn_train(BPNN *net, float *eo, float *eh) { int in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); /*** Compute error on output and hidden units. ***/ bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); *eo = out_err; *eh = hid_err; /*** Adjust input and hidden weights. ***/ bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in, net->input_weights, net->input_prev_weights); } void bpnn_save(BPNN *net, char *filename) { int n1, n2, n3, i, j, memcnt; float dvalue, **w; char *mem; ///add// FILE *pFile; pFile = fopen( filename, "w+" ); /////// /* if ((fd = creat(filename, 0644)) == -1) { fprintf(stderr, "BPNN_SAVE: Cannot create '%s'\n", filename); return; } */ n1 = net->input_n; n2 = net->hidden_n; n3 = net->output_n; fprintf(stderr, "Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename); //fflush(stdout); //write(fd, (char *) &n1, sizeof(int)); //write(fd, (char *) &n2, sizeof(int)); //write(fd, (char *) &n3, sizeof(int)); fwrite( (char *) &n1 , sizeof(char), sizeof(char), pFile); fwrite( (char *) &n2 , sizeof(char), sizeof(char), pFile); fwrite( (char *) &n3 , sizeof(char), sizeof(char), pFile); memcnt = 0; w = net->input_weights; mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float))); for (i = 0; i <= n1; i++) { for (j = 0; j <= n2; j++) { dvalue = w[i][j]; fastcopy(&mem[memcnt], &dvalue, sizeof(float)); memcnt += sizeof(float); } } //write(fd, mem, (n1+1) * (n2+1) * sizeof(float)); fwrite( mem , (unsigned)(sizeof(float)), (unsigned) ((n1+1) * (n2+1) * sizeof(float)) , pFile); free(mem); memcnt = 0; w = net->hidden_weights; mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float))); for (i = 0; i <= n2; i++) { for (j = 0; j <= n3; j++) { dvalue = w[i][j]; fastcopy(&mem[memcnt], &dvalue, sizeof(float)); memcnt += sizeof(float); } } //write(fd, mem, (n2+1) * (n3+1) * sizeof(float)); fwrite( mem , sizeof(float), (unsigned) ((n2+1) * (n3+1) * sizeof(float)) , pFile); free(mem); fclose(pFile); return; } BPNN *bpnn_read(char *filename) { char *mem; BPNN *new_t; int fd, n1, n2, n3, i, j, memcnt; if ((fd = open(filename, 0, 0644)) == -1) { return (NULL); } fprintf(stderr, "Reading '%s'\n", filename); //fflush(stdout); read(fd, (char *) &n1, sizeof(int)); read(fd, (char *) &n2, sizeof(int)); read(fd, (char *) &n3, sizeof(int)); new_t = bpnn_internal_create(n1, n2, n3); fprintf(stderr, "'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3); fprintf(stderr, "Reading input weights..."); //fflush(stdout); memcnt = 0; mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float))); read(fd, mem, (n1+1) * (n2+1) * sizeof(float)); for (i = 0; i <= n1; i++) { for (j = 0; j <= n2; j++) { fastcopy(&(new_t->input_weights[i][j]), &mem[memcnt], sizeof(float)); memcnt += sizeof(float); } } free(mem); fprintf(stderr, "Done\nReading hidden weights..."); //fflush(stdout); memcnt = 0; mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float))); read(fd, mem, (n2+1) * (n3+1) * sizeof(float)); for (i = 0; i <= n2; i++) { for (j = 0; j <= n3; j++) { fastcopy(&(new_t->hidden_weights[i][j]), &mem[memcnt], sizeof(float)); memcnt += sizeof(float); } } free(mem); close(fd); fprintf(stderr, "Done\n"); //fflush(stdout); bpnn_zero_weights(new_t->input_prev_weights, n1, n2); bpnn_zero_weights(new_t->hidden_prev_weights, n2, n3); return (new_t); }
cpl_msg.c
/* * This file is part of the ESO Common Pipeline Library * Copyright (C) 2001-2017 European Southern Observatory * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H # include <config.h> #endif #include <stdlib.h> #include <stdarg.h> #include <stdio.h> #include <string.h> #include <signal.h> #include <time.h> #ifdef HAVE_UNISTD_H # include <unistd.h> #endif #ifdef HAVE_TERMIOS_H # include <termios.h> #else # ifdef HAVE_TERMIO_H # include <termio.h> # else # error Neither termios.h nor termio.h found! # endif #endif #ifdef HAVE_STROPTS_H # include <stropts.h> #endif #if !defined(HAVE_STROPTS_H) || defined(HAVE_TERMIOS_H) || \ defined(GWINSZ_IN_SYS_IOCTL) # ifdef HAVE_SYS_IOCTL_H # include <sys/ioctl.h> # else # error Cannot find header file for ioctl()! # endif #endif #undef CPL_HAVE_STREAM_DUPLICATION #undef CPL_HAVE_WINDOW_RESIZING #ifndef __STRICT_ANSI__ /* gcc -ansi and gcc -std=... enters here */ #if defined HAVE_FILENO && defined HAVE_FDOPEN && defined HAVE_DUP #if defined HAVE_DECL_FILENO && defined HAVE_DECL_FDOPEN && defined HAVE_DECL_DUP #define CPL_HAVE_STREAM_DUPLICATION #if defined HAVE_SIGACTION && defined HAVE_SIGEMPTYSET #define CPL_HAVE_WINDOW_RESIZING #endif #endif #endif #endif #ifdef _OPENMP # include <omp.h> #endif #include <cxutils.h> #include <cxmessages.h> #include <cpl_error_impl.h> #include <cpl_msg.h> #include <cpl_memory.h> /** * @defgroup cpl_msg Messages * * This module provides functions to display and log messages. * The following operations are supported: * * - Enable messages output to terminal or to log file. * - Optionally adding informative tags to messages. * - Setting width for message line wrapping. * - Control the message indentation level. * - Filtering messages according to their severity level. * * To activate and deactivate the messaging system, the functions * @c cpl_msg_init() and @c cpl_msg_stop() need to be used. However, * since they are called anyway by the functions @c cpl_init() and * @c cpl_end(), there is generally no need to call them explicitly, * and starting from CPL 2.1 they are deprecated. * These functions would typically be called at the beginning and at * the end of a program. An attempt to use an uninitialised messaging * system would generate a warning message. More functions may also * be used to configure the messaging system, and here is an example * of a possible initialisation: * * @code * ... * cpl_msg_set_time_on(); * cpl_msg_set_component_on(); * cpl_msg_set_domain("Source detection"); * cpl_msg_set_domain_on(); * cpl_msg_set_level(CPL_MSG_ERROR); * cpl_msg_set_log_level(CPL_MSG_DEBUG); * ... * @endcode * * The functions of these kind, are meant to configure the messaging * system, defining its "style", once and for all. For this reason * such functions are not supposed to be called from threads. * Three different tags may be attached to any message: @em time, * @em domain, and @em component. The @em time tag is the time * of printing of the message, and can optionally be turned * on or off with the functions @c cpl_msg_set_time_on() and * @c cpl_msg_set_time_off(). The @em domain tag is an identifier * of the main program running (typically, a pipeline recipe), * and can be optionally turned on or off with the functions * @c cpl_msg_set_domain_on() and @c cpl_msg_set_domain_off(). * Finally, the @em component tag is used to identify a component * of the program running (typically, a function), and can be optionally * turned on or off with the functions @c cpl_msg_set_component_on() * and @c cpl_msg_set_component_off(). As a default, none of the * above tags are attached to messages sent to terminal. However, * all tags are always used in messages sent to the log file. A * further tag, the @em severity tag, can never be turned off. * This tag depends on the function used to print a message, that * can be either @c cpl_msg_debug(), @c cpl_msg_info(), @c cpl_msg_warning(), * or @c cpl_msg_error(). The @em time and @em severity tags are * all prepended to any message, and are not affected by the message * indentation controlled by the functions @c cpl_msg_indent(), * @c cpl_msg_indent_more(), @c cpl_msg_indent_less(), and * @c cpl_msg_set_indentation(). * * @par Synopsis: * @code * #include <cpl_msg.h> * @endcode */ /**@{*/ /* * This is the length for a time string in ISO 8601 format */ #define TIME_ISO8601_LENGTH (20) /* * This is the max length for text lines that are written to the log file. * It is also the max length for text lines sent to the terminal, in case * the window size cannot be determined by the appropriate call to ioctl(). * If this number is zero or negative, then lines are not splitted. */ #define DEFAULT_WIDTH (-1) /* * Strings used for the severity field in the message: */ #define ERROR_STRING "[ ERROR ] " #define WARNING_STRING "[WARNING] " #define INFO_STRING "[ INFO ] " #define DEBUG_STRING "[ DEBUG ] " inline static void cpl_msg_out(cpl_msg_severity, const char *, int, const char *, va_list) CPL_ATTR_PRINTF(4,0); static const char default_component[] = "<empty field>"; static const char default_format[] = "<empty message>"; static cpl_msg_severity log_min_level = CPL_MSG_OFF; static cpl_msg_severity term_min_level = CPL_MSG_INFO; static int time_tag = 0; static int threadid_tag = 0; static int domain_tag = 0; static int component_tag = 0; static int msg_init = 0; static char domain[CPL_MAX_DOMAIN_NAME] = "Undefined domain"; static char logfile_name[CPL_MAX_LOGFILE_NAME] = ".logfile"; static FILE *logfile = NULL; static int page_width = DEFAULT_WIDTH; static const int log_width = DEFAULT_WIDTH; static int indent_step = 2; static int indent_value = 0; static int overwrite = 0; #ifdef _OPENMP #pragma omp threadprivate(indent_value, overwrite) #endif static FILE *msg_stdout; static FILE *msg_stderr; #ifdef CPL_HAVE_STREAM_DUPLICATION static int out_stream; #ifdef CPL_HAVE_WINDOW_RESIZING static struct sigaction act, oact; #endif #endif static cx_print_func default_printer; static cx_print_func default_error; /* * @brief * Ensure system initialisation if it was forgotten. * * @return Nothing. * * This private function is used to call cpl_msg_init() if it was not * called by the user. */ inline static void _cpl_msg_init(const char *component) { if (msg_init == 0) { if (cpl_msg_init() == CPL_ERROR_NONE) { cpl_msg_warning("CPL messaging", "The CPL messaging function %s() was called before the system " "had been initialised. Please call the function cpl_init() " "before attempting to use any CPL function.", component); } else { fprintf(stderr, "%s\n", cpl_error_get_message()); fprintf(stderr, "SEVERE ERROR: The CPL messaging system has " "not been initialised, and this may cause undefined program " "behaviour: please call the function cpl_init() before " "attempting to use any CPL function."); } msg_init = 1; } } /* * @brief * Get current date and time in ISO8601 format. * @param * String of size at least TIME_ISO8601_LENGTH * * @return void * * This private function just returns the current time in ISO8601 format. */ static void _cpl_timestamp_iso8601(char *timestamp) { char _timestamp[TIME_ISO8601_LENGTH]; const time_t seconds = time(NULL); strncpy(_timestamp, "0000-00-00T00:00:00", TIME_ISO8601_LENGTH); if (seconds != ((time_t)-1)) { struct tm time_of_day; if (localtime_r(&seconds, &time_of_day) != NULL) { int _errno = errno; strftime(_timestamp, TIME_ISO8601_LENGTH, "%Y-%m-%dT%H:%M:%S", &time_of_day); /* * POSIX does not specify errno settings for strftime. If there * is any, discard it! */ errno = _errno; } } strncpy(timestamp, _timestamp, TIME_ISO8601_LENGTH); return; } #ifdef CPL_HAVE_STREAM_DUPLICATION /* * @brief * Signal handler for signal @c SIGWINCH * * @param i Dummy argument (not used!) * * @return Nothing. * * This private function accomodates the output line width of the messaging * subsystem to the new window size on arrival of the signal @c SIGWINCH. */ static void _cpl_change_width(int i) { struct winsize win; CPL_UNUSED(i); if (ioctl(out_stream, TIOCGWINSZ, &win) < 0 || win.ws_col < 1) page_width = DEFAULT_WIDTH; else page_width = win.ws_col; } #endif /* * @brief * Handler for printing to standard output. * * @param String to print. * * @return Nothing. * * This private function is used by cx_print() to write any message * to standard output. */ static void _cpl_print_out(const cxchar *message) { fputs(message, msg_stdout); fflush(msg_stdout); } /* * @brief * Handler for printing to standard error. * * @param String to print. * * @return Nothing. * * This private function is used by cx_printerr() to write any message * to standard output. */ static void _cpl_print_err(const cxchar *message) { fputs(message, msg_stderr); fflush(msg_stderr); } /* * @brief * Split a string according to the max allowed page width. * * @param split Processed output string at least of size CPL_MAX_MSG_LENGTH * @param s Input string to be processed. * @param blanks Number of blanks to be inserted at every split point. * @param width Max number of characters between split points. * * @return Pointer to the modified character string, or if the width is less * than one, pointer to the unmodified input string. * * This private function is used for splitting a string avoiding to exceed * a maximum width (as for instance the width of the terminal where the * string is going to be printed). The splitting is performed without * breaking words, i.e. by replacing with a newline character ('\\n') * the last blank character before the maximum allowed width. Newline * characters already present in the input string are preserved. * Single words that exceed the max allowed width would not be split, * just in this case long lines are tolerated. A number of blanks to * be inserted at every split point must be specified, setting the * left indentation level for the printed string. This number must * not exceed the maximum allowed width. */ static const char *strsplit(char * split, const char *s, int blanks, int width) { int i, j, k; int cuti = 0; int cutj = 0; int limit = width; if (width < 1) return s; if (blanks >= width) blanks = width - 1; /* Give up indentation */ for (i = 0, j = 0; i < CPL_MAX_MSG_LENGTH && j < CPL_MAX_MSG_LENGTH; i++, j++) { split[j] = s[i]; if (s[i] == ' ' || s[i] == '\0' || s[i] == '\n') { if (i >= limit) { /* * Go back to the previous cuttable position, if possible */ if (limit - cuti < width - blanks) { j = cutj; i = cuti; } else { if (s[i] == '\0') break; } /* * Split here, and insert blanks */ split[j] = '\n'; for (k = 0, j++; k < blanks && j < CPL_MAX_MSG_LENGTH; k++, j++) split[j] = ' '; j--; limit = width - blanks + i; } else { if (s[i] == '\0') break; if (s[i] == '\n') { /* * Split point already present in input string: just add * the specified number of blanks */ if (s[i+1] == '\0') { split[j] = '\0'; break; } for (k = 0, j++; k < blanks && j < CPL_MAX_MSG_LENGTH; k++, j++) split[j] = ' '; j--; limit = width - blanks + i; } /* * Keep track of the last cuttable position */ cutj = j; cuti = i; } } } /* * Safety belt! */ split[CPL_MAX_MSG_LENGTH - 1] = '\0'; return split; } /* * @brief * Format and output message string. * * @param severity Severity level of the incoming message. * @param component Name of the component/function generating the message. * @param caller 1 = cpl_msg_info_overwritable, 0 = all the others. * @param format Format string in the usual C convention. * @param al Variable argument list associated to the @em format. * * @return Nothing. * * This private function is used to actually display/add the message * to terminal and/or log file. Messages with severity level equal to * "error" or greater would be sent to stderr, the other messages * would go to stdout. * * If the severity level is lower than the levels set by * @b cpl_msg_set_level() and @b cpl_msg_set_log_level(), then * the message is not displayed. * * @see cpl_msg_set_level(), cpl_msg_set_log_level() */ inline static void cpl_msg_out(cpl_msg_severity severity, const char *component, int caller, const char *format, va_list al) { struct tm time_of_day; time_t seconds; char msg_text[CPL_MAX_MSG_LENGTH] = ""; char msg_log[CPL_MAX_MSG_LENGTH] = ""; char msg_term[CPL_MAX_MSG_LENGTH] = ""; char split[CPL_MAX_MSG_LENGTH]; #ifdef _OPENMP char *tid; #endif int start_log_line, start_term_line; int copy_only; int i; if (severity < term_min_level && severity < log_min_level) return; if (severity == CPL_MSG_OFF) return; seconds = time(NULL); cx_vsnprintf(msg_text, CPL_MAX_MSG_LENGTH, format, al); /* * Date and time. Note that time tag and severity field are not * affected by indentation. Date and time are always present in * the log file, optional in the terminal output. */ if (localtime_r(&seconds, &time_of_day) == NULL) { msg_log[0] = '\0'; msg_term[0] = '\0'; } else { /* three 2-digit integers + 2 colons + 1 space + terminating 0 */ char msg_timestamp[10]; strftime(msg_timestamp, 10, "%H:%M:%S ", &time_of_day); strncpy(msg_log, msg_timestamp, 10); if (time_tag) { strncpy(msg_term, msg_timestamp, 10); } else { msg_term[0] = '\0'; } } /* * Severity label */ if (severity == CPL_MSG_ERROR) { strncat(msg_log, ERROR_STRING, CPL_MAX_MSG_LENGTH - strlen(msg_log) - 1); strncat(msg_term, ERROR_STRING, CPL_MAX_MSG_LENGTH - strlen(msg_term) - 1); } else if (severity == CPL_MSG_WARNING) { strncat(msg_log, WARNING_STRING, CPL_MAX_MSG_LENGTH - strlen(msg_log) - 1); strncat(msg_term, WARNING_STRING, CPL_MAX_MSG_LENGTH - strlen(msg_term) - 1); } else if (severity == CPL_MSG_INFO) { strncat(msg_log, INFO_STRING, CPL_MAX_MSG_LENGTH - strlen(msg_log) - 1); strncat(msg_term, INFO_STRING, CPL_MAX_MSG_LENGTH - strlen(msg_term) - 1); } else if (severity == CPL_MSG_DEBUG) { strncat(msg_log, DEBUG_STRING, CPL_MAX_MSG_LENGTH - strlen(msg_log) - 1); strncat(msg_term, DEBUG_STRING, CPL_MAX_MSG_LENGTH - strlen(msg_term) - 1); } /* * Domain, component name, and message appended: */ if (domain_tag) { strncat(msg_term, domain, CPL_MAX_MSG_LENGTH - strlen(msg_term) - 1); strncat(msg_term, ": ", CPL_MAX_MSG_LENGTH - strlen(msg_term) - 1); } if (component_tag || term_min_level == CPL_MSG_DEBUG) { strncat(msg_term, component, CPL_MAX_MSG_LENGTH - strlen(msg_term) - 1); strncat(msg_term, ": ", CPL_MAX_MSG_LENGTH - strlen(msg_term) - 1); } strncat(msg_log, component, CPL_MAX_MSG_LENGTH - strlen(msg_log) - 1); strncat(msg_log, ": ", CPL_MAX_MSG_LENGTH - strlen(msg_log) - 1); #ifdef _OPENMP /* * Thread ID */ tid = cpl_sprintf("[tid=%03d] ", omp_get_thread_num()); strncat(msg_log, tid, CPL_MAX_MSG_LENGTH - strlen(msg_log) - 1); if (threadid_tag) strncat(msg_term, tid, CPL_MAX_MSG_LENGTH - strlen(msg_term) - 1); cpl_free(tid); #endif /* * Message indentation */ for (i = 0; i < indent_value; i++) { strncat(msg_log, " ", CPL_MAX_MSG_LENGTH - strlen(msg_log) - 1); strncat(msg_term, " ", CPL_MAX_MSG_LENGTH - strlen(msg_term) - 1); } start_log_line = strlen(msg_log); start_term_line = strlen(msg_term); /* * Finally add the message text. If message is too long * it is truncated. */ copy_only = CPL_MAX_MSG_LENGTH - strlen(msg_log) - 1; strncat(msg_log, msg_text, copy_only); copy_only = CPL_MAX_MSG_LENGTH - strlen(msg_term) - 1; strncat(msg_term, msg_text, copy_only); if (severity >= log_min_level) fprintf(logfile, "%s\n", strsplit(split, msg_log, start_log_line, log_width)); if (severity >= term_min_level) { if (severity > CPL_MSG_WARNING) { if (overwrite) { cx_printerr("\n%s\n", strsplit(split, msg_term, start_term_line, page_width)); overwrite = 0; } else cx_printerr("%s\n", strsplit(split, msg_term, start_term_line, page_width)); } else if (caller) { char *c = strrchr(msg_term, '\n'); if (c >= msg_term) *c = '\0'; cx_print("\r%s", msg_term); } else if (overwrite) { cx_print("\n%s\n", strsplit(split, msg_term, start_term_line, page_width)); overwrite = 0; } else cx_print("%s\n", strsplit(split, msg_term, start_term_line, page_width)); } } /** * @brief * Initialise the messaging system * * @return @c CPL_ERROR_NONE on success. * * @error * <table class="ec" align="center"> * <tr> * <td class="ecl">CPL_ERROR_FILE_ALREADY_OPEN</td> * <td class="ecr"> * The messaging system was already initialised. * </td> * </tr> * <tr> * <td class="ecl">CPL_ERROR_DUPLICATING_STREAM</td> * <td class="ecr"> * <tt>stdout</tt> and <tt>stderr</tt> streams cannot be duplicated. * </td> * </tr> * <tr> * <td class="ecl">CPL_ERROR_ASSIGNING_STREAM</td> * <td class="ecr"> * A stream cannot be associated with a file descriptor. * </td> * </tr> * </table> * @enderror * * This function needs to be called to activate the messaging system, * typically at the beginning of a program. An attempt to use any * messaging function before turning the system on would generate * a warning message. The messaging system needs to be deactivated * by calling the function @c cpl_msg_stop(). However, since these * functions are called anyway by the functions @c cpl_init() and * @c cpl_end(), there is generally no need to call them explicitly, * and starting from CPL 2.1 they are deprecated. * * When @c cpl_msg_init() is called, the @em stdout and * @em stderr streams are duplicated for greater flexibility of * the system. The terminal width is determined (if possible), * and the resized window signal handler is deployed to monitor * possible changes of the terminal window width. If the width of * the output device cannot be determined, lines of text are not * splitted when written to output. If line splitting is not wanted, * the function @c cpl_msg_set_width() should be called specifying * a non positive width. */ cpl_error_code cpl_msg_init(void) { #ifdef CPL_HAVE_STREAM_DUPLICATION struct winsize win; static int err_stream; #endif if (msg_init > 0) return cpl_error_set_(CPL_ERROR_FILE_ALREADY_OPEN); #ifdef CPL_HAVE_STREAM_DUPLICATION /* * First duplicate stdout and stderr streams */ if ((out_stream = dup(fileno(stdout))) < 0) return cpl_error_set_(CPL_ERROR_DUPLICATING_STREAM); if (!(err_stream = dup(fileno(stderr)))) return cpl_error_set_(CPL_ERROR_DUPLICATING_STREAM); if (!(msg_stdout = fdopen(out_stream, "a"))) return cpl_error_set_(CPL_ERROR_ASSIGNING_STREAM); if (!(msg_stderr = fdopen(err_stream, "a"))) return cpl_error_set_(CPL_ERROR_ASSIGNING_STREAM); #else msg_stdout = stdout; msg_stderr = stderr; #endif default_printer = cx_print_set_handler(_cpl_print_out); default_error = cx_printerr_set_handler(_cpl_print_err); msg_init = 1; #ifdef CPL_HAVE_STREAM_DUPLICATION #ifdef CPL_HAVE_WINDOW_RESIZING /* * Get the terminal window size, and if successful deploy the handler * for any image resizing at runtime. */ if (ioctl(out_stream, TIOCGWINSZ, &win) < 0 || win.ws_col < 1) return CPL_ERROR_NONE; page_width = win.ws_col; act.sa_handler = _cpl_change_width; sigemptyset(&act.sa_mask); act.sa_flags = 0; /* Probably more appropriate flags * * initialisation should be inserted here. */ act.sa_flags &= ~SA_SIGINFO; /* Eliminates SA_SIGINFO from any setting * * above. */ sigaction(SIGWINCH, &act, &oact); #endif #endif /* Setup time zone information for localtime_r() calls in cpl_msg_out() */ tzset(); return CPL_ERROR_NONE; } /** * @brief * Turn the messaging system off. * * @return Nothing * * This function needs to be called to turn the messaging system off, * typically at the end of a program. To turn the messaging system * on the function @c cpl_msg_init() needs to be called. However, since * these functions are called anyway by the functions @c cpl_init() * and @c cpl_end(), there is generally no need to call them explicitly, * and starting from CPL 2.1 they are deprecated. * * When @c cpl_msg_stop() is called, the default resized window signal * handler is restored, and the duplicated output streams are closed. * If a log file is still open, it is closed, and the log verbosity * level is set to CPL_MSG_OFF. If the messaging system is not on, * nothing is done, and no error is set. */ void cpl_msg_stop(void) { if (msg_init == 0) return; #ifdef CPL_HAVE_STREAM_DUPLICATION #ifdef CPL_HAVE_WINDOW_RESIZING if (act.sa_handler == _cpl_change_width) sigaction(SIGWINCH, &oact, NULL); #endif #endif cx_print_set_handler(default_printer); cx_printerr_set_handler(default_error); if (msg_stdout != stdout) fclose(msg_stdout); if (msg_stderr != stderr) fclose(msg_stderr); cpl_msg_stop_log(); msg_init = 0; } /** * @brief * Open and initialise a log file. * * @param verbosity Verbosity level. * * @return @c CPL_ERROR_NONE on success. * * @error * <table class="ec" align="center"> * <tr> * <td class="ecl">CPL_ERROR_FILE_ALREADY_OPEN</td> * <td class="ecr"> * A log file was already started. * </td> * </tr> * <tr> * <td class="ecl">CPL_ERROR_FILE_NOT_CREATED</td> * <td class="ecr"> * Log file cannot be created. * </td> * </tr> * </table> * @enderror * * If the specified @em verbosity level is different from @c CPL_MSG_OFF, * a log file is created and initialised with a header containing the * start logging time, the @em domain identifier set by the function * @c cpl_msg_set_domain(), and the chosen @em verbosity level. The * @em verbosity specifies the lowest severity level that a message * should have to be written to the log file. The name of the created * log file may be previously set with the function @c cpl_msg_set_log_name(), * otherwise it is left to a default ".logfile". The log file name can * be obtained by calling the function @c cpl_msg_get_log_name(). * Typically this function is called at the beginning of a program. * Calling it while a log file is already open has no effect, but it * will return an error code. * * @note * This function is meant to configure once and for all the behaviour * and the "style" of the messaging system, and it is not supposed to * be called in threads. */ cpl_error_code cpl_msg_set_log_level(cpl_msg_severity verbosity) { _cpl_msg_init(cpl_func); if (logfile) { /* * If a log file was already open, nothing is done, but a status * is returned. */ return cpl_error_set_(CPL_ERROR_FILE_ALREADY_OPEN); } if (verbosity != CPL_MSG_OFF) { char timeLabel[TIME_ISO8601_LENGTH]; if ((logfile = fopen(logfile_name, "w")) == NULL) return cpl_error_set_message_(CPL_ERROR_FILE_NOT_CREATED, "%s", logfile_name); (void)setvbuf(logfile, (char *) NULL, _IOLBF, 0); log_min_level = verbosity; /* * Write log file header */ _cpl_timestamp_iso8601(timeLabel); fprintf(logfile, "\n"); fprintf(logfile, "Start time : %s\n", timeLabel); fprintf(logfile, "Program name : %s\n", domain); fprintf(logfile, "Severity level : "); switch(verbosity) { case CPL_MSG_DEBUG : fprintf(logfile, DEBUG_STRING); break; case CPL_MSG_INFO : fprintf(logfile, INFO_STRING); break; case CPL_MSG_WARNING : fprintf(logfile, WARNING_STRING); break; case CPL_MSG_ERROR : fprintf(logfile, ERROR_STRING); break; default : break; } fprintf(logfile, "\n\n"); } return CPL_ERROR_NONE; } /** * @brief * Close the current log file. * * @return @c CPL_ERROR_NONE on success. * * The log file is closed. The name of the created log file is always the same, * and can be obtained by calling the function @c cpl_msg_get_log_name(). * An attempt to close a non existing log file would not generate an error * condition. This routine may be called in case the logging should be * terminated before the end of a program. Otherwise, the function * @c cpl_msg_stop() would automatically close the log file when called * at the end of the program. */ cpl_error_code cpl_msg_stop_log(void) { _cpl_msg_init(cpl_func); if (log_min_level != CPL_MSG_OFF) { log_min_level = CPL_MSG_OFF; fclose(logfile); logfile = NULL; } return CPL_ERROR_NONE; } /** * @brief * Get the log file name. * * @return Logfile name * * The name of the log file is returned. */ const char *cpl_msg_get_log_name(void) { _cpl_msg_init(cpl_func); return logfile_name; } /** * @brief * Set the log file name. * * @param name Name of log file. * * @return @c CPL_ERROR_NONE on success. * * @error * <table class="ec" align="center"> * <tr> * <td class="ecl">CPL_ERROR_NULL_INPUT</td> * <td class="ecr"> * The specified <i>name</i> is a <tt>NULL</tt> pointer. * </td> * </tr> * <tr> * <td class="ecl">CPL_ERROR_FILE_ALREADY_OPEN</td> * <td class="ecr"> * A log file was already started. * </td> * </tr> * <tr> * <td class="ecl">CPL_ERROR_ILLEGAL_INPUT</td> * <td class="ecr"> * The specified <i>name</i> is longer than * <tt>CPL_MAX_LOGFILE_NAME</tt> characters (including the * terminating '\\0'). * </td> * </tr> * </table> * @enderror * * This function is used to set the log file name, and can only be * called before the log is opened by @c cpl_msg_set_log_level(). * If this function is not called, or the specified @em name is * longer than <tt>CPL_MAX_LOGFILE_NAME</tt> characters, the log * file name is left to its default, ".logfile". * * @note * This function is meant to configure once and for all the behaviour * and the "style" of the messaging system, and it is not supposed to * be called in threads. */ cpl_error_code cpl_msg_set_log_name(const char *name) { _cpl_msg_init(cpl_func); if (name == NULL) return cpl_error_set_(CPL_ERROR_NULL_INPUT); if (logfile) return cpl_error_set_message_(CPL_ERROR_FILE_ALREADY_OPEN, "%s: %p", name, (const void*)logfile); if (strlen(name) > CPL_MAX_LOGFILE_NAME - 1) return cpl_error_set_message_(CPL_ERROR_ILLEGAL_INPUT, "%s: %u + 1 > " CPL_STRINGIFY(CPL_MAX_LOGFILE_NAME) " = " CPL_XSTRINGIFY(CPL_MAX_LOGFILE_NAME), name, (unsigned)strlen(name)); strcpy(logfile_name, name); return CPL_ERROR_NONE; } /** * @brief * Set verbosity level of output to terminal. * * @param verbosity Verbosity level. * * @return Nothing. * * The @em verbosity specifies the lowest severity level that a message * should have for being displayed to terminal. If this function is not * called, the verbosity level defaults to @c CPL_MSG_INFO. * * @note * This function is not supposed to be called in threads. */ void cpl_msg_set_level(cpl_msg_severity verbosity) { _cpl_msg_init(cpl_func); term_min_level = verbosity; } /*----------------------------------------------------------------------------*/ /** @brief Set verbosity level of terminal output using an environment variable @return void @see cpl_msg_set_level @note This function can be used for run-time control of the verbosity level of unit test modules. The CPL verbosity level of output to terminal is set according to the environment variable CPL_MSG_LEVEL: debug: CPL_MSG_DEBUG info: CPL_MSG_INFO warning: CPL_MSG_WARNING error: CPL_MSG_ERROR off: CPL_MSG_OFF Any other value (including NULL) will cause the function to do nothing. */ /*----------------------------------------------------------------------------*/ void cpl_msg_set_level_from_env(void) { const char * level = getenv("CPL_MSG_LEVEL"); if (level == NULL) return; if (!strcmp(level, "debug")) cpl_msg_set_level(CPL_MSG_DEBUG); else if (!strcmp(level, "info")) cpl_msg_set_level(CPL_MSG_INFO); else if (!strcmp(level, "warning")) cpl_msg_set_level(CPL_MSG_WARNING); else if (!strcmp(level, "error")) cpl_msg_set_level(CPL_MSG_ERROR); else if (!strcmp(level, "off")) cpl_msg_set_level(CPL_MSG_OFF); return; } /** * @brief * Get current log verbosity level. * * @return Current verbosity level. * * Get current verbosity level set for the output to the log file. */ cpl_msg_severity cpl_msg_get_log_level(void) { _cpl_msg_init(cpl_func); return log_min_level; } /** * @brief * Get current terminal verbosity level. * * @return Current verbosity level. * * Get current verbosity level set for the output to terminal. */ cpl_msg_severity cpl_msg_get_level(void) { _cpl_msg_init(cpl_func); return term_min_level; } /** * @brief * Attach a @em time tag to output messages. * * @return Nothing. * * As a default, @em time tags are attached just to messages written * to the log file. This function must be called to display the @em time * tag also in messages written to terminal. To turn the @em time tag * off the function @c cpl_msg_set_time_off() should be called. * * @note * This function is meant to configure once and for all the behaviour * and the "style" of the messaging system, and it is not supposed to * be called in threads. */ void cpl_msg_set_time_on(void) { _cpl_msg_init(cpl_func); time_tag = 1; } /** * @brief * Disable the @em time tag in output messages. * * @return Nothing. * * The @em time tag is turned off in messages written to terminal. * The @em time tag cannot be turned off in messages written to the * log file. * * @note * This function is meant to configure once and for all the behaviour * and the "style" of the messaging system, and it is not supposed to * be called in threads. */ void cpl_msg_set_time_off(void) { _cpl_msg_init(cpl_func); time_tag = 0; } /** * @brief * Attach a @em thread id tag to output messages. * * @return Nothing. * * As a default, @em thread ids tags are attached just to messages written * to the log file. This function must be called to display the @em thread id * tag also in messages written to terminal. To turn the @em thread id tag * off the function @c cpl_msg_set_threadid_off() should be called. * * @note * This function is meant to configure once and for all the behaviour * and the "style" of the messaging system, and it is not supposed to * be called in threads. */ void cpl_msg_set_threadid_on(void) { _cpl_msg_init(cpl_func); threadid_tag = 1; } /** * @brief * Disable the @em thread id tag to output messages * * @return Nothing. * * The @em thread id tag is turned off in messages written to terminal. * The @em thread id tag cannot be turned off in messages written to the * log file. * * @note * This function is meant to configure once and for all the behaviour * and the "style" of the messaging system, and it is not supposed to * be called in threads. */ void cpl_msg_set_threadid_off(void) { _cpl_msg_init(cpl_func); threadid_tag = 0; } /** * @brief * Attach the @em domain tag to output messages. * * @return Nothing. * * As a default, the @em domain tag is just written to the header of * the log file. This function must be called to attach the @em domain * tag to all messages written to terminal. If the @em domain tag is * on and no domain tag was specified, the string "Undefined domain" * (or something analogous) would be attached to all messages. To turn * the @em domain tag off the function @c cpl_msg_set_domain_off() must * be called. * * @note * This function is meant to configure once and for all the behaviour * and the "style" of the messaging system, and it is not supposed to * be called in threads. */ void cpl_msg_set_domain_on(void) { _cpl_msg_init(cpl_func); domain_tag = 1; } /** * @brief * Disable the @em domain tag in output messages. * * @return Nothing. * * The @em domain tag is turned off in messages written to terminal. * * @note * This function is meant to configure once and for all the behaviour * and the "style" of the messaging system, and it is not supposed to * be called in threads. */ void cpl_msg_set_domain_off(void) { _cpl_msg_init(cpl_func); domain_tag = 0; } /** * @brief * Attach the @em component tag to output messages. * * @return Nothing. * * As a default, the @em component tag is attached just to messages written * to the log file. This function must be called to display the @em component * tag also in messages written to terminal. To turn the @em component tag * off the function @c cpl_msg_set_component_off() should be called. However, * the @em component tag is always shown when the verbosity level is set * to @c CPL_MSG_DEBUG. * * @note * This function is meant to configure once and for all the behaviour * and the "style" of the messaging system, and it is not supposed to * be called in threads. */ void cpl_msg_set_component_on(void) { _cpl_msg_init(cpl_func); component_tag = 1; } /** * @brief * Disable the @em component tag in output messages. * * @return Nothing. * * The @em component tag is turned off in messages written to terminal, * unless the verbosity level is set to @c CPL_MSG_DEBUG. The @em component * tag cannot be turned off in messages written to the log file. * * @note * This function is meant to configure once and for all the behaviour * and the "style" of the messaging system, and it is not supposed to * be called in threads. */ void cpl_msg_set_component_off(void) { _cpl_msg_init(cpl_func); component_tag = 0; } /** * @brief * Set the @em domain name. * * @param name Any task identifier, typically a pipeline recipe name. * * @return Nothing. * * This routine should be called at a pipeline recipe start, and * before a possible call to the function cpl_msg_set_log_level() or the * proper task identifier would not appear in the log file header. * The @em domain tag is attached to messages sent to terminal only * if the function @c cpl_msg_set_domain_on() is called. If the * @em domain tag is on and no domain tag was specified, the string * "Undefined domain" (or something analogous) would be attached * to all messages. To turn the @em domain tag off the function * @c cpl_msg_set_domain_off() should be called. If @em name is a * @c NULL pointer, nothing is done, and no error is set. * * @note * This function is meant to configure once and for all the behaviour * and the "style" of the messaging system, and it is not supposed to * be called in threads. */ void cpl_msg_set_domain(const char *name) { _cpl_msg_init(cpl_func); if (name == NULL) return; if (strlen(name) >= CPL_MAX_DOMAIN_NAME) { strncpy(domain, name, CPL_MAX_DOMAIN_NAME); domain[CPL_MAX_DOMAIN_NAME-1] = '\0'; } else { strcpy(domain, name); } } /** * @brief * Get the @em domain name. * * @return Pointer to "domain" string. * * This routine returns always the same pointer to the statically * allocated buffer containing the "domain" string. */ const char *cpl_msg_get_domain(void) { _cpl_msg_init(cpl_func); return domain; } /** * @brief * Set the maximum width of the displayed text. * * @param width Max width of the displayed text. * * @return Nothing. * * If a message is longer than @em width characters, it would be broken * into shorter lines before being displayed to terminal. However, words * longer than @em width would not be broken, and in this case longer * lines would be printed. This function is automatically called by the * messaging system every time the terminal window is resized, and the * width is set equal to the new width of the terminal window. If @em width * is zero or negative, long message lines would not be broken. Lines are * never broken in log files. */ void cpl_msg_set_width(int width) { _cpl_msg_init(cpl_func); if (width < 0) width = 0; page_width = width; } /** * @brief * Set the indentation step. * * @param step Indentation step. * * @return Nothing. * * To maintain a consistent message display style, this routine * should be called at most once, and just at program start. * A message line might be moved leftward or rightward by a * number of characters that is a multiple of the specified * indentation step. Setting the indentation step to zero or * to a negative number would eliminate messages indentation. * If this function is not called, the indentation step defaults to 2. * * @note * This function is meant to configure once and for all the behaviour * and the "style" of the messaging system, and it is not supposed to * be called in threads. */ void cpl_msg_set_indentation(int step) { _cpl_msg_init(cpl_func); if (step < 0) step = 0; indent_step = step; } /** * @brief * Set the indentation level. * * @return Nothing. * * @param level Indentation level. * * Any message printed after a call to this function would be indented * by a number of characters equal to the @em level multiplied by the * indentation step specified with the function @c cpl_msg_set_indentation(). * Specifying a negative indentation level would set the indentation * level to zero. */ void cpl_msg_indent(int level) { _cpl_msg_init(cpl_func); if (level < 0) level = 0; indent_value = level * indent_step; } /** * @brief * Increase the message indentation by one indentation step. * * @return Nothing. * * Calling this function is equivalent to increase the indentation * level by 1. See function @c cpl_msg_indent(). */ void cpl_msg_indent_more(void) { _cpl_msg_init(cpl_func); indent_value += indent_step; } /** * @brief * Decrease the message indentation by one indentation step. * * @return Nothing. * * Calling this function is equivalent to decrease the indentation level * by 1. If the indentation level is already 0, it is not decreased. */ void cpl_msg_indent_less(void) { _cpl_msg_init(cpl_func); if (indent_value > 0) indent_value -= indent_step; } /** * @brief * Display an error message. * * @return Nothing. * * @param component Name of the component generating the message. * @param format Format string. * @param ... Variable argument list associated to the format string. * * The @em format string should follow the usual @c printf() convention. * Newline characters shouldn't generally be used, as the message * would be split automatically according to the width specified with * @b cpl_msg_set_width(). Inserting a newline character would * enforce breaking a line of text even before the current row is * filled. Newline characters at the end of the @em format string * are not required. If @em component is a @c NULL pointer, it would * be set to the string "<empty field>". If @em format is a @c NULL * pointer, the message "<empty message>" would be printed. */ void cpl_msg_error(const char *component, const char *format, ...) { const char *c = component != NULL ? component : default_component; _cpl_msg_init(cpl_func); if (format == NULL) { cpl_msg_error(c, default_format); } else { va_list al; va_start(al, format); cpl_msg_out(CPL_MSG_ERROR, c, 0, format, al); va_end(al); } } /** * @brief * Display a warning message. * * @return Nothing. * * @param component Name of the function generating the message. * @param format Format string. * @param ... Variable argument list associated to the format string. * * See the description of the function @c cpl_msg_error(). */ void cpl_msg_warning(const char *component, const char *format, ...) { const char *c = component != NULL ? component : default_component; _cpl_msg_init(cpl_func); if (format == NULL) { cpl_msg_warning(c, default_format); } else { va_list al; va_start(al, format); cpl_msg_out(CPL_MSG_WARNING, c, 0, format, al); va_end(al); } } /** * @brief * Display an information message. * * @return Nothing. * * @param component Name of the function generating the message. * @param format Format string. * @param ... Variable argument list associated to the format string. * * See the description of the function @c cpl_msg_error(). */ void cpl_msg_info(const char *component, const char *format, ...) { const char *c = component != NULL ? component : default_component; _cpl_msg_init(cpl_func); if (format == NULL) { cpl_msg_info(c, default_format); } else { va_list al; va_start(al, format); cpl_msg_out(CPL_MSG_INFO, c, 0, format, al); va_end(al); } } /** * @brief * Display an overwritable information message. * * @return Nothing. * * @param component Name of the function generating the message. * @param format Format string. * @param ... Variable argument list associated to the format string. * * See the description of the function @c cpl_msg_error(). The severity * of the message issued by @c cpl_msg_info_overwritable() is the same * as the severity of a message issued using @c cpl_msg_info(). The only * difference with the @c cpl_msg_info() function is that the printed * message would be overwritten by a new message issued using again * cpl_msg_info_overwritable(), if no other meassages were issued with * other messaging functions in between. This function would be used * typically in loops, as in the following example: * @code * iter = 1000; * for (i = 0; i < iter; i++) { * cpl_msg_info_overwritable(cpl_func, "Median computation... %d out of %d", i, iter); * <median computation would take place here> * } * @endcode * * @note * In the current implementation, an overwritable message is obtained * by not adding the new-line character ('\\n') at the end of the message * (contrary to what @c cpl_msg_info() does). */ void cpl_msg_info_overwritable(const char *component, const char *format, ...) { const char *c = component != NULL ? component : default_component; _cpl_msg_init(cpl_func); overwrite = 1; if (format == NULL) { cpl_msg_info(c, default_format); } else { va_list al; va_start(al, format); cpl_msg_out(CPL_MSG_INFO, c, 1, format, al); va_end(al); } } /** * @brief * Display a debug message. * * @return Nothing. * * @param component Name of the function generating the message. * @param format Format string. * @param ... Variable argument list associated to the format string. * * See the description of the function @c cpl_msg_error(). */ void cpl_msg_debug(const char *component, const char *format, ...) { const char *c = component != NULL ? component : default_component; _cpl_msg_init(cpl_func); if (format == NULL) { cpl_msg_debug(c, default_format); } else { va_list al; va_start(al, format); cpl_msg_out(CPL_MSG_DEBUG, c, 0, format, al); va_end(al); } } /** * @brief * Display a progress message predicting the time required in a loop. * * @return Nothing. * * @param component Name of the function generating the message. * @param i Iteration, starting with 0 and less than iter. * @param iter Total number of iterations. * @param format Format string. * @param ... Variable argument list associated to the format string. * @see cpl_msg_info() * @deprecated Use standard calls such as cpl_msg_info() instead. * */ void cpl_msg_progress(const char *component, int i, int iter, const char *format, ...) { const char *c = component != NULL ? component : default_component; const double tquiet = 10.0; /* Accept silence for this many seconds */ static double tstart = 0.0; /* Initialize to avoid false warnings */ static double tend = 0.0; /* Initialize to avoid false warnings */ double tspent; static int iprev = 0; /* Used to detect some illegal calls */ static int nprev = 0; /* Used to detect some illegal calls */ static int didmsg = 0; int percent; _cpl_msg_init(cpl_func); if (format == NULL) format = default_format; if (i >= iter || i < 0 || iter < 1) return; if (i == 0) { if (iter == 1) return; /* A meaningful message is not possible */ /* Reset check variables */ nprev = iter; iprev = 0; /* Assume caller printed a message before the loop started */ /* Find out how much CPU was spent at this point */ tstart = cpl_test_get_cputime(); tend = 0; didmsg = 0; return; } /* More input errors: i must increase during loop */ if (i <= iprev) return; /* cpl_ensure() ? */ iprev = i; /* More input errors: iter may not change during loop */ if (iter != nprev) return; /* cpl_ensure() ? */ /* Compute the time spent in the loop so far */ tspent = cpl_test_get_cputime() - tstart; /* This fraction (rounded down) of iterations has been completed */ percent = (i * 100) / iter; if (i == iter-1 && didmsg) cpl_msg_debug(c, "Loop time prediction offset (%d%% done) [s]: " "%.2g", percent, tend - tspent); /* Return if the time spent is within the allowed time of silence + the predicted time if any */ if (tspent < tquiet + tend) return; /* A prediction has not (yet) been made, or the prediction was too optismistic. Make a (new) prediction on the assumption that the average time so far required per iteration is unchanged for the remaining iteration(s). */ tend = tspent * (iter - i) / (double) i; /* Update the starting point for the prediction */ tstart += tspent; if (tend >= 0.5) { /* Do not predict less than 1 second */ const int itend = 0.5 + tend; /* Roundoff to integer */ /* %% is expanded twice */ char * extformat = cpl_sprintf("%s. %d%%%% done, about %d seconds left", format, percent, itend); va_list al; va_start(al, format); cpl_msg_out(CPL_MSG_INFO, c, 0, extformat, al); va_end(al); didmsg++; cpl_free(extformat); } } /**@}*/
conv.h
#ifndef CONV_H #define CONV_H namespace TSnap { /// Sequentially converts the table into a graph with links from nodes in \c SrcCol to those in \c DstCol. template<class PGraph> PGraph ToGraph(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy) { PGraph Graph = PGraph::TObj::New(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); // make single pass over all rows in the table if (NodeType == atInt) { for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx]; TInt DVal = (Table->IntCols)[DstColIdx][CurrRowIdx]; //Using AddNodeUnchecked ensures that no error is thrown when the same node is seen twice Graph->AddNodeUnchecked(SVal); Graph->AddNodeUnchecked(DVal); Graph->AddEdgeUnchecked(SVal, DVal); } } else if (NodeType == atFlt) { // node values - i.e. the unique values of src/dst col //THashSet<TInt> IntNodeVals; // for both int and string node attr types. THash<TFlt, TInt> FltNodeVals; for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal, DVal; TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal); TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal); Graph->AddEdge(SVal, DVal); } } else { for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value TInt DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value //Using AddNodeUnchecked ensures that no error is thrown when the same node is seen twice Graph->AddNodeUnchecked(SVal); Graph->AddNodeUnchecked(DVal); Graph->AddEdgeUnchecked(SVal, DVal); } } Graph->SortNodeAdjV(); return Graph; } /// Converts the Table into a graph with edges from \c SrcCol to \c DstCol, and attribute vector defined by the arguments. template<class PGraph> PGraph ToNetwork(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& SrcAttrV, TStrV& DstAttrV, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { PGraph Graph = PGraph::TObj::New(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); //Table->AddGraphAttributeV(SrcAttrV, false, true, false); //Table->AddGraphAttributeV(DstAttrV, false, false, true); //Table->AddGraphAttributeV(EdgeAttrV, true, false, true); // node values - i.e. the unique values of src/dst col //THashSet<TInt> IntNodeVals; // for both int and string node attr types. THash<TFlt, TInt> FltNodeVals; // node attributes THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; // make single pass over all rows in the table for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal, DVal; if (NodeType == atFlt) { TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal); TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal); } else if (NodeType == atInt || NodeType == atStr) { if (NodeType == atInt) { SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx]; DVal = (Table->IntCols)[DstColIdx][CurrRowIdx]; } else { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value } if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); } if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); } //CheckAndAddIntNode(Graph, IntNodeVals, SVal); //CheckAndAddIntNode(Graph, IntNodeVals, DVal); } // add edge and edge attributes Graph->AddEdge(SVal, DVal, CurrRowIdx); // Aggregate edge attributes and add to graph for (TInt i = 0; i < EdgeAttrV.Len(); i++) { TStr ColName = EdgeAttrV[i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName); break; } } // get src and dst node attributes into hashmaps if ((Table->SrcNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SVal, Table->SrcNodeAttrV, CurrRowIdx, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } if ((Table->DstNodeAttrV).Len() > 0) { Table->AddNodeAttributes(DVal, Table->DstNodeAttrV, CurrRowIdx, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } } // aggregate node attributes and add to graph if ((Table->SrcNodeAttrV).Len() > 0 || (Table->DstNodeAttrV).Len() > 0) { for (TNEANet::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) { TInt NId = NodeI.GetId(); if (NodeIntAttrs.IsKey(NId)) { TStrIntVH IntAttrVals = NodeIntAttrs.GetDat(NId); for (TStrIntVH::TIter it = IntAttrVals.BegI(); it < IntAttrVals.EndI(); it++) { TInt AttrVal = Table->AggregateVector<TInt>(it.GetDat(), AggrPolicy); Graph->AddIntAttrDatN(NId, AttrVal, it.GetKey()); } } if (NodeFltAttrs.IsKey(NId)) { TStrFltVH FltAttrVals = NodeFltAttrs.GetDat(NId); for (TStrFltVH::TIter it = FltAttrVals.BegI(); it < FltAttrVals.EndI(); it++) { TFlt AttrVal = Table->AggregateVector<TFlt>(it.GetDat(), AggrPolicy); Graph->AddFltAttrDatN(NId, AttrVal, it.GetKey()); } } if (NodeStrAttrs.IsKey(NId)) { TStrStrVH StrAttrVals = NodeStrAttrs.GetDat(NId); for (TStrStrVH::TIter it = StrAttrVals.BegI(); it < StrAttrVals.EndI(); it++) { TStr AttrVal = Table->AggregateVector<TStr>(it.GetDat(), AggrPolicy); Graph->AddStrAttrDatN(NId, AttrVal, it.GetKey()); } } } } return Graph; } /// Calls ToNetwork with an empty attribute vector. Convenience wrapper. template<class PGraph> PGraph ToNetwork(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy) { TStrV V; return ToNetwork<PGraph>(Table, SrcCol, DstCol, V, AggrPolicy); } #ifdef GCC_ATOMIC /// Performs table to graph conversion in parallel using the sort-first algorithm. This is the recommended method to use. template<class PGraphMP> PGraphMP ToGraphMP(PTable Table, const TStr& SrcCol, const TStr& DstCol) { // double start = omp_get_wtime(); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt NumRows = Table->NumValidRows; TIntV SrcCol1, DstCol1, SrcCol2, DstCol2; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { SrcCol2.Reserve(NumRows, NumRows); } #pragma omp section { DstCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } } // double endResize = omp_get_wtime(); // printf("Resize time = %f\n", endResize-start); TIntPrV Partitions; Table->GetPartitionRanges(Partitions, omp_get_max_threads()); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); SrcCol2[RowId] = RowI.GetIntAttr(SrcColIdx); DstCol1[RowId] = RowI.GetIntAttr(DstColIdx); DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); SrcCol2[RowId] = RowI.GetStrMapById(SrcColIdx); DstCol1[RowId] = RowI.GetStrMapById(DstColIdx); DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); RowI++; } } } omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp single nowait { #pragma omp task untied shared(SrcCol1, DstCol1) { TTable::QSortKeyVal(SrcCol1, DstCol1, 0, NumRows-1); } } #pragma omp single nowait { #pragma omp task untied shared(SrcCol2, DstCol2) { TTable::QSortKeyVal(DstCol2, SrcCol2, 0, NumRows-1); } } #pragma omp taskwait } // TTable::PSRSKeyVal(SrcCol1, DstCol1, 0, NumRows-1); // TTable::PSRSKeyVal(DstCol2, SrcCol2, 0, NumRows-1); // TInt IsS = TTable::CheckSortedKeyVal(SrcCol1, DstCol1, 0, NumRows-1); // TInt IsD = TTable::CheckSortedKeyVal(DstCol2, SrcCol2, 0, NumRows-1); // printf("IsSorted = %d %d\n", IsS.Val, IsD.Val); // double endSort = omp_get_wtime(); // printf("Sort time = %f\n", endSort-endCopy); //return TNGraphMP::New(10, 100); TInt NumThreads = omp_get_max_threads(); TInt PartSize = (NumRows/NumThreads); TIntV SrcOffsets, DstOffsets; SrcOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) { CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); } } SrcOffsets.Add(NumRows); DstOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) { CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); } } DstOffsets.Add(NumRows); TInt SrcPartCnt = SrcOffsets.Len()-1; TInt DstPartCnt = DstOffsets.Len()-1; // for (TInt i = 0; i < SrcOffsets.Len(); i++) { // printf("%d ", SrcOffsets[i].Val); // } // printf("\n"); // for (TInt i = 0; i < DstOffsets.Len(); i++) { // printf("%d ", DstOffsets[i].Val); // } // printf("\n"); TIntV SrcNodeCounts, DstNodeCounts; SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt); DstNodeCounts.Reserve(DstPartCnt, DstPartCnt); #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { SrcNodeCounts[i] = 1; TInt CurrNode = SrcCol1[SrcOffsets[i]]; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { SrcNodeCounts[i]++; CurrNode = SrcCol1[j]; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { DstNodeCounts[i] = 1; TInt CurrNode = DstCol2[DstOffsets[i]]; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { DstNodeCounts[i]++; CurrNode = DstCol2[j]; } } } } } // for (TInt i = 0; i < SrcNodeCounts.Len(); i++) { // printf("%d ", SrcNodeCounts[i].Val); // } // printf("\n"); // for (TInt i = 0; i < DstNodeCounts.Len(); i++) { // printf("%d ", DstNodeCounts[i].Val); // } // printf("\n"); TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < SrcPartCnt; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < DstPartCnt; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val); TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { TInt CurrNode = SrcCol1[SrcOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { CurrNode = SrcCol1[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { TInt CurrNode = DstCol2[DstOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]); TInt CurrCount = 1; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { CurrNode = DstCol2[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } // double endNode = omp_get_wtime(); // printf("Node time = %f\n", endNode-endSort); TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); // double endNodeResize = omp_get_wtime(); // printf("(NodeResize time = %f)\n", endNodeResize-endNode); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } // double endMerge = omp_get_wtime(); // printf("Merge time = %f\n", endMerge-endNode); TInt NumNodes = Nodes.Len(); // printf("NumNodes = %d\n", NumNodes.Val); PGraphMP Graph = TNGraphMP::New(NumNodes, NumRows); NumThreads = 1; int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,Delta) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = DstCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } //printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val); OutVV[m].Reserve(Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = SrcCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } //printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val); InVV[m].Reserve(Sz); } //double endTr = omp_get_wtime(); //printf("Thread=%d, i=%d, t=%f\n", omp_get_thread_num(), m, endTr-startTr); } // double endAlloc = omp_get_wtime(); // printf("Alloc time = %f\n", endAlloc-endMerge); NumThreads = omp_get_max_threads(); Delta = (NumNodes+NumThreads-1)/(10*NumThreads); omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(dynamic) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = DstCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } //printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val); OutVV[m].CopyUniqueFrom(DstCol1, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = SrcCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } //printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val); InVV[m].CopyUniqueFrom(SrcCol2, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); //double endTr = omp_get_wtime(); //printf("Thread=%d, i=%d, t=%f\n", omp_get_thread_num(), m, endTr-startTr); } Graph->SetNodes(NumNodes); // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } /// Performs table to graph conversion in parallel. Uses the hash-first method, which is less optimal, use ToGraphMP instead. template<class PGraphMP> PGraphMP ToGraphMP3(PTable Table, const TStr& SrcCol, const TStr& DstCol) { PNGraphMP Graph; int MaxThreads = omp_get_max_threads(); int Length, Threads, Delta, Nodes, Last; uint64_t NumNodesEst; TInt SrcColIdx, DstColIdx; TIntV InVec, OutVec; SrcColIdx = Table->GetColIdx(SrcCol); DstColIdx = Table->GetColIdx(DstCol); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); /* Estimate number of nodes in the graph */ int NumRows = Table->Next.Len(); double Load = 10; int sz = NumRows / Load; int *buckets = (int *)malloc(sz * sizeof(int)); #pragma omp parallel for for (int i = 0; i < sz; i++) buckets[i] = 0; if (NodeType == atInt) { #pragma omp parallel for for (int i = 0; i < NumRows; i++) { int vert = Table->IntCols[DstColIdx][i]; buckets[vert % sz] = 1; } } else if (NodeType == atStr ) { #pragma omp parallel for for (int i = 0; i < NumRows; i++) { int vert = (Table->StrColMaps)[DstColIdx][i]; buckets[vert % sz] = 1; } } int cnt = 0; #pragma omp parallel for reduction(+:cnt) for (int i = 0; i < sz; i++) { if (buckets[i] == 0) cnt += 1; } NumNodesEst = sz * log ((double)sz / cnt); free (buckets); /* Until we correctly estimate the number of nodes */ while (1) { Graph = TNGraphMP::New(NumNodesEst, 100); Length = Graph->Reserved(); Threads = MaxThreads/2; Delta = (Length + Threads - 1) / Threads; OutVec.Gen(Length); InVec.Gen(Length); /* build the node hash table, count the size of edge lists */ Last = NumRows; Nodes = 0; omp_set_num_threads(Threads); #pragma omp parallel for schedule(static, Delta) for (int CurrRowIdx = 0; CurrRowIdx < Last; CurrRowIdx++) { if ((uint64_t) Nodes + 1000 >= NumNodesEst) { /* need bigger hash table */ continue; } TInt SVal, DVal; if (NodeType == atInt) { SVal = Table->IntCols[SrcColIdx][CurrRowIdx]; DVal = Table->IntCols[DstColIdx][CurrRowIdx]; } else if (NodeType == atStr ) { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; } int SrcIdx = abs((SVal.GetPrimHashCd()) % Length); if (!Graph->AddOutEdge1(SrcIdx, SVal, DVal)) { #pragma omp critical { Nodes++; } } __sync_fetch_and_add(&OutVec[SrcIdx].Val, 1); int DstIdx = abs((DVal.GetPrimHashCd()) % Length); if (!Graph->AddInEdge1(DstIdx, SVal, DVal)) { #pragma omp critical { Nodes++; } } __sync_fetch_and_add(&InVec[DstIdx].Val, 1); } if ((uint64_t) Nodes + 1000 >= NumNodesEst) { /* We need to double our num nodes estimate */ Graph.Clr(); InVec.Clr(); OutVec.Clr(); NumNodesEst *= 2; } else { break; } } Graph->SetNodes(Nodes); uint Edges = 0; for (int i = 0; i < Length; i++) { Edges += OutVec[i] + InVec[i]; } for (int Idx = 0; Idx < Length; Idx++) { if (OutVec[Idx] > 0 || InVec[Idx] > 0) { Graph->ReserveNodeDegs(Idx, InVec[Idx], OutVec[Idx]); } } /* assign edges */ Length = Graph->Reserved(); Threads = MaxThreads; Delta = (Length + Threads - 1) / Threads; omp_set_num_threads(Threads); #pragma omp parallel for schedule(static,Delta) for (int CurrRowIdx = 0; CurrRowIdx < Last; CurrRowIdx++) { TInt SVal, DVal; if (NodeType == atInt) { SVal = Table->IntCols[SrcColIdx][CurrRowIdx]; DVal = Table->IntCols[DstColIdx][CurrRowIdx]; } else if (NodeType == atStr) { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; } Graph->AddOutEdge2(SVal, DVal); Graph->AddInEdge2(SVal, DVal); } /* sort edges */ Length = Graph->Reserved(); Threads = MaxThreads*2; Delta = (Length + Threads - 1) / Threads; omp_set_num_threads(Threads); #pragma omp parallel for schedule(dynamic) for (int Idx = 0; Idx < Length; Idx++) { if (OutVec[Idx] > 0 || InVec[Idx] > 0) { Graph->SortEdges(Idx, InVec[Idx], OutVec[Idx]); } } return Graph; } /// Does Table to Network conversion in parallel using the sort-first algorithm. This is the recommended method to use. template<class PGraphMP> inline PGraphMP ToNetworkMP(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& SrcAttrV, TStrV& DstAttrV, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { TStopwatch* Sw = TStopwatch::GetInstance(); Sw->Start(TStopwatch::AllocateColumnCopies); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TInt NumRows = Table->GetNumValidRows(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2; THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol2.Reserve(NumRows, NumRows); } } Sw->Stop(TStopwatch::AllocateColumnCopies); Sw->Start(TStopwatch::CopyColumns); TIntPrV Partitions; Table->GetPartitionRanges(Partitions, omp_get_max_threads()); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } Sw->Stop(TStopwatch::CopyColumns); Sw->Start(TStopwatch::Sort); omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(SrcCol1, EdgeCol1) #endif { TTable::QSortKeyVal(SrcCol1, EdgeCol1, 0, NumRows-1); } } #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(EdgeCol2, DstCol2) #endif { TTable::QSortKeyVal(DstCol2, EdgeCol2, 0, NumRows-1); } } #ifndef GLib_WIN32 #pragma omp taskwait #endif } Sw->Stop(TStopwatch::Sort); Sw->Start(TStopwatch::Group); TInt NumThreads = omp_get_max_threads(); TInt PartSize = (NumRows/NumThreads); // Find the offset of all partitions, each of which contains a list of rows. // Nodes from same sources or destinations are ensured to be kept within same partition. TIntV SrcOffsets, DstOffsets; SrcOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) { // ensure that rows from the same sources are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); } } SrcOffsets.Add(NumRows); DstOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) { // ensure that rows to the same destinations are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); } } DstOffsets.Add(NumRows); TInt SrcPartCnt = SrcOffsets.Len()-1; // number of partitions TInt DstPartCnt = DstOffsets.Len()-1; // number of partitions // count the number of source nodes and destination nodes in each partition TIntV SrcNodeCounts, DstNodeCounts; SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt); DstNodeCounts.Reserve(DstPartCnt, DstPartCnt); #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { SrcNodeCounts[i] = 1; TInt CurrNode = SrcCol1[SrcOffsets[i]]; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { SrcNodeCounts[i]++; CurrNode = SrcCol1[j]; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { DstNodeCounts[i] = 1; TInt CurrNode = DstCol2[DstOffsets[i]]; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { DstNodeCounts[i]++; CurrNode = DstCol2[j]; } } } } } TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < SrcPartCnt; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < DstPartCnt; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val); // find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } // Find the starting offset of each node (in both src and dst) #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { TInt CurrNode = SrcCol1[SrcOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { CurrNode = SrcCol1[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { TInt CurrNode = DstCol2[DstOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]); TInt CurrCount = 1; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { CurrNode = DstCol2[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } Sw->Stop(TStopwatch::Group); Sw->Start(TStopwatch::MergeNeighborhoods); // Find the combined neighborhood (both out-neighbors and in-neighbors) of each node TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } Sw->Stop(TStopwatch::MergeNeighborhoods); Sw->Start(TStopwatch::AddNeighborhoods); TInt NumNodes = Nodes.Len(); PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows); // NumThreads = omp_get_max_threads(); // int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); // omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,100) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = EdgeCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } OutVV[m].Reserve(Sz); OutVV[m].CopyUniqueFrom(EdgeCol1, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = EdgeCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } InVV[m].Reserve(Sz); InVV[m].CopyUniqueFrom(EdgeCol2, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); } Graph->SetNodes(NumNodes); Sw->Stop(TStopwatch::AddNeighborhoods); Sw->Start(TStopwatch::AddEdges); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetIntAttr(SrcColIdx); TInt DstId = RowI.GetIntAttr(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) { TStr ColName = EdgeAttrV[ea_i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(RowId, Table->IntCols[Index][RowId], ColName); break; case atFlt: Graph->AddFltAttrDatE(RowId, Table->FltCols[Index][RowId], ColName); break; case atStr: Graph->AddStrAttrDatE(RowId, Table->GetStrVal(Index, RowId), ColName); break; } } if ((Table->SrcNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SrcId, Table->SrcNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } if ((Table->DstNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SrcId, Table->DstNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetStrMapById(SrcColIdx); TInt DstId = RowI.GetStrMapById(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) { TStr ColName = EdgeAttrV[ea_i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(RowId, Table->IntCols[Index][RowId], ColName); break; case atFlt: Graph->AddFltAttrDatE(RowId, Table->FltCols[Index][RowId], ColName); break; case atStr: Graph->AddStrAttrDatE(RowId, Table->GetStrVal(Index, RowId), ColName); break; } } if ((Table->SrcNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SrcId, Table->SrcNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } if ((Table->DstNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SrcId, Table->DstNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } } } } // aggregate node attributes and add to graph if ((Table->SrcNodeAttrV).Len() > 0 || (Table->DstNodeAttrV).Len() > 0) { for (typename PGraphMP::TObj::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) { TInt NId = NodeI.GetId(); if (NodeIntAttrs.IsKey(NId)) { TStrIntVH IntAttrVals = NodeIntAttrs.GetDat(NId); for (TStrIntVH::TIter it = IntAttrVals.BegI(); it < IntAttrVals.EndI(); it++) { TInt AttrVal = Table->AggregateVector<TInt>(it.GetDat(), AggrPolicy); Graph->AddIntAttrDatN(NId, AttrVal, it.GetKey()); } } if (NodeFltAttrs.IsKey(NId)) { TStrFltVH FltAttrVals = NodeFltAttrs.GetDat(NId); for (TStrFltVH::TIter it = FltAttrVals.BegI(); it < FltAttrVals.EndI(); it++) { TFlt AttrVal = Table->AggregateVector<TFlt>(it.GetDat(), AggrPolicy); Graph->AddFltAttrDatN(NId, AttrVal, it.GetKey()); } } if (NodeStrAttrs.IsKey(NId)) { TStrStrVH StrAttrVals = NodeStrAttrs.GetDat(NId); for (TStrStrVH::TIter it = StrAttrVals.BegI(); it < StrAttrVals.EndI(); it++) { TStr AttrVal = Table->AggregateVector<TStr>(it.GetDat(), AggrPolicy); Graph->AddStrAttrDatN(NId, AttrVal, it.GetKey()); } } } } Graph->SetEdges(NumRows); Sw->Stop(TStopwatch::AddEdges); // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } /// Calls ToNetworkMP with empty attribute vector. Convenience wrapper. template<class PGraphMP> PGraphMP ToNetworkMP(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy) { TStrV V; return ToNetworkMP<PGraphMP>(Table, SrcCol, DstCol, V,AggrPolicy); } ///Implements table to network conversion in parallel. Not the recommended algorithm, using ToNetworkMP instead. template<class PGraphMP> inline PGraphMP ToNetworkMP2(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& SrcAttrV, TStrV& DstAttrV, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { TStopwatch* Sw = TStopwatch::GetInstance(); Sw->Start(TStopwatch::AllocateColumnCopies); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TInt NumRows = Table->NumValidRows; const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol2.Reserve(NumRows, NumRows); } } Sw->Stop(TStopwatch::AllocateColumnCopies); Sw->Start(TStopwatch::CopyColumns); TIntPrV Partitions; // int NThreads = omp_get_max_threads(); const int NThreads = 40; Table->GetPartitionRanges(Partitions, NThreads); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } // printf("NumRows = %d\n", NumRows.Val); // printf("NThreads = %d\n", NThreads); // for (int i = 0; i < Partitions.Len(); i++) { // printf("Partition %d %d->%d\n", i, Partitions[i].GetVal1().Val, Partitions[i].GetVal2().Val); // } int Parts[NThreads+1]; for (int i = 0; i < NThreads; i++) { Parts[i] = NumRows.Val / NThreads * i; } Parts[NThreads] = NumRows; // for (int i = 0; i < NThreads+1; i++) { // printf("Parts[%d] = %d\n", i, Parts[i]); // } Sw->Stop(TStopwatch::CopyColumns); Sw->Start(TStopwatch::Sort); TInt ExtremePoints[4][NThreads]; omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp for schedule(static) nowait for (int i = 0; i < NThreads; i++) { TInt StartPos = Parts[i]; TInt EndPos = Parts[i+1]-1; // TODO: Handle empty partition TTable::QSortKeyVal(SrcCol1, EdgeCol1, StartPos, EndPos); ExtremePoints[0][i] = SrcCol1[StartPos]; ExtremePoints[2][i] = SrcCol1[EndPos]; } #pragma omp for schedule(static) nowait for (int i = 0; i < NThreads; i++) { TInt StartPos = Parts[i]; TInt EndPos = Parts[i+1]-1; // TODO: Handle empty partition TTable::QSortKeyVal(DstCol2, EdgeCol2, StartPos, EndPos); ExtremePoints[1][i] = DstCol2[StartPos]; ExtremePoints[3][i] = DstCol2[EndPos]; } } // for (int i = 0; i < NThreads; i++) { // printf("ExtremePoints[%d] = %d-%d -> %d-%d\n", i, ExtremePoints[0][i].Val, ExtremePoints[1][i].Val, ExtremePoints[2][i].Val, ExtremePoints[3][i].Val); // } // find min points TInt MinId(INT_MAX); for (int j = 0; j < 2; j++) { for (int i = 0; i < NThreads; i++) { if (MinId > ExtremePoints[j][i]) { MinId = ExtremePoints[j][i]; } } } TInt MaxId(-1); for (int j = 2; j < 4; j++) { for (int i = 0; i < NThreads; i++) { if (MaxId < ExtremePoints[j][i]) { MaxId = ExtremePoints[j][i]; } } } // printf("MinId = %d\n", MinId.Val); // printf("MaxId = %d\n", MaxId.Val); Sw->Stop(TStopwatch::Sort); Sw->Start(TStopwatch::Group); // const int NumCollectors = omp_get_max_threads(); const int NumCollectors = 20; int Range = MaxId.Val - MinId.Val; TIntV IdRanges(NumCollectors+1); for (int j = 0; j < NumCollectors; j++) { IdRanges[j] = MinId + Range/NumCollectors*j; } IdRanges[NumCollectors] = MaxId+1; // for (int i = 0; i < NumCollectors+1; i++) { // printf("IdRanges[%d] = %d\n", i, IdRanges[i].Val); // } int SrcOffsets[NThreads][NumCollectors+1]; #pragma omp parallel for schedule(static) for (int i = 0; i < NThreads; i++) { int CollectorId = 0; for (int j = Parts[i]; j < Parts[i+1]; j++) { while (SrcCol1[j] >= IdRanges[CollectorId]) { SrcOffsets[i][CollectorId++] = j; } } while (CollectorId <= NumCollectors) { SrcOffsets[i][CollectorId++] = Parts[i+1]; } } int DstOffsets[NThreads][NumCollectors+1]; #pragma omp parallel for schedule(static) for (int i = 0; i < NThreads; i++) { int CollectorId = 0; for (int j = Parts[i]; j < Parts[i+1]; j++) { while (DstCol2[j] >= IdRanges[CollectorId]) { DstOffsets[i][CollectorId++] = j; } } while (CollectorId <= NumCollectors) { DstOffsets[i][CollectorId++] = Parts[i+1]; } } // for (int i = 0; i < NThreads; i++) { // for (int j = 0; j < NumCollectors+1; j++) { // printf("SrcOffsets[%d][%d] = %d\n", i, j, SrcOffsets[i][j]); // } // } // for (int i = 0; i < NThreads; i++) { // for (int j = 0; j < NumCollectors+1; j++) { // printf("DstOffsets[%d][%d] = %d\n", i, j, DstOffsets[i][j]); // } // } TIntV SrcCollectorOffsets(NumCollectors+1); SrcCollectorOffsets[0] = 0; for (int k = 0; k < NumCollectors; k++) { int SumOffset = 0; for (int i = 0; i < NThreads; i++) { SumOffset += SrcOffsets[i][k+1] - SrcOffsets[i][k]; } SrcCollectorOffsets[k+1] = SrcCollectorOffsets[k] + SumOffset; } TIntV DstCollectorOffsets(NumCollectors+1); DstCollectorOffsets[0] = 0; for (int k = 0; k < NumCollectors; k++) { int SumOffset = 0; for (int i = 0; i < NThreads; i++) { SumOffset += DstOffsets[i][k+1] - DstOffsets[i][k]; } DstCollectorOffsets[k+1] = DstCollectorOffsets[k] + SumOffset; } // for (int i = 0; i < NumCollectors+1; i++) { // printf("SrcCollectorOffsets[%d] = %d\n", i, SrcCollectorOffsets[i].Val); // } // for (int i = 0; i < NumCollectors+1; i++) { // printf("DstCollectorOffsets[%d] = %d\n", i, DstCollectorOffsets[i].Val); // } TIntV SrcCol3, EdgeCol3, EdgeCol4, DstCol4; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol3.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol3.Reserve(NumRows, NumRows); } #pragma omp section { DstCol4.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol4.Reserve(NumRows, NumRows); } } TIntV SrcNodeCounts(NumCollectors), DstNodeCounts(NumCollectors); #pragma omp parallel for schedule(static) for (int k = 0; k < NumCollectors; k++) { int ind = SrcCollectorOffsets[k]; for (int i = 0; i < NThreads; i++) { for (int j = SrcOffsets[i][k]; j < SrcOffsets[i][k+1]; j++) { SrcCol3[ind] = SrcCol1[j]; EdgeCol3[ind] = EdgeCol1[j]; ind++; } } TTable::QSortKeyVal(SrcCol3, EdgeCol3, SrcCollectorOffsets[k], SrcCollectorOffsets[k+1]-1); int SrcCount = 0; if (SrcCollectorOffsets[k+1] > SrcCollectorOffsets[k]) { SrcCount = 1; for (int j = SrcCollectorOffsets[k]+1; j < SrcCollectorOffsets[k+1]; j++) { if (SrcCol3[j] != SrcCol3[j-1]) { SrcCount++; } } } SrcNodeCounts[k] = SrcCount; ind = DstCollectorOffsets[k]; for (int i = 0; i < NThreads; i++) { for (int j = DstOffsets[i][k]; j < DstOffsets[i][k+1]; j++) { DstCol4[ind] = DstCol2[j]; EdgeCol4[ind] = EdgeCol2[j]; ind++; } } TTable::QSortKeyVal(DstCol4, EdgeCol4, DstCollectorOffsets[k], DstCollectorOffsets[k+1]-1); int DstCount = 0; if (DstCollectorOffsets[k+1] > DstCollectorOffsets[k]) { DstCount = 1; for (int j = DstCollectorOffsets[k]+1; j < DstCollectorOffsets[k+1]; j++) { if (DstCol4[j] != DstCol4[j-1]) { DstCount++; } } } DstNodeCounts[k] = DstCount; } TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < NumCollectors; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } // printf("Sorted = %d - %d\n", SrcCol3.IsSorted(), DstCol4.IsSorted()); // for (int i = 0; i < NumRows-1; i++) { // if (SrcCol3[i] > SrcCol3[i+1]) { printf("i=%d: %d %d\n", i, SrcCol3[i].Val, SrcCol3[i+1].Val); } // } // for (int i = 0; i < NumRows-1; i++) { // if (DstCol4[i] > DstCol4[i+1]) { printf("i=%d: %d %d\n", i, DstCol4[i].Val, DstCol4[i+1].Val); } // } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < NumCollectors; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } // Find the starting offset of each node (in both src and dst) #pragma omp parallel for schedule(dynamic) for (int t = 0; t < 2*NumCollectors; t++) { if (t < NumCollectors) { TInt i = t; if (SrcCollectorOffsets[i] < SrcCollectorOffsets[i+1]) { TInt CurrNode = SrcCol3[SrcCollectorOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcCollectorOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcCollectorOffsets[i]+1; j < SrcCollectorOffsets[i+1]; j++) { while (j < SrcCollectorOffsets[i+1] && SrcCol3[j] == CurrNode) { j++; } if (j < SrcCollectorOffsets[i+1]) { CurrNode = SrcCol3[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - NumCollectors; if (DstCollectorOffsets[i] < DstCollectorOffsets[i+1]) { TInt CurrNode = DstCol4[DstCollectorOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstCollectorOffsets[i]); TInt CurrCount = 1; for (TInt j = DstCollectorOffsets[i]+1; j < DstCollectorOffsets[i+1]; j++) { while (j < DstCollectorOffsets[i+1] && DstCol4[j] == CurrNode) { j++; } if (j < DstCollectorOffsets[i+1]) { CurrNode = DstCol4[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } Sw->Stop(TStopwatch::Group); Sw->Start(TStopwatch::MergeNeighborhoods); // Find the combined neighborhood (both out-neighbors and in-neighbors) of each node TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } Sw->Stop(TStopwatch::MergeNeighborhoods); Sw->Start(TStopwatch::AddNeighborhoods); TInt NumNodes = Nodes.Len(); PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows); // NumThreads = omp_get_max_threads(); // int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); // omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,100) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = EdgeCol3.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } OutVV[m].Reserve(Sz); OutVV[m].CopyUniqueFrom(EdgeCol3, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = EdgeCol4.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } InVV[m].Reserve(Sz); InVV[m].CopyUniqueFrom(EdgeCol4, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); } Graph->SetNodes(NumNodes); Sw->Stop(TStopwatch::AddNeighborhoods); Sw->Start(TStopwatch::AddEdges); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetIntAttr(SrcColIdx); TInt DstId = RowI.GetIntAttr(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetStrMapById(SrcColIdx); TInt DstId = RowI.GetStrMapById(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } Graph->SetEdges(NumRows); Sw->Stop(TStopwatch::AddEdges); // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } /// Calls ToNetworkMP2 with an empty attribute vector. Convenience wrapper. template<class PGraphMP> PGraphMP ToNetworkMP2(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy) { TStrV V; return ToNetworkMP2<PGraphMP>(Table, SrcCol, DstCol, V, V, V, AggrPolicy); } #endif // GCC_ATOMIC /// Loads a mode, with name Name, into the PMMNet from the TTable. NCol specifies the node id column and NodeAttrV the node attributes. int LoadModeNetToNet(PMMNet Graph, const TStr& Name, PTable Table, const TStr& NCol, TStrV& NodeAttrV); /// Loads the nodes specified in column NCol from the TTable with the attributes specified in NodeAttrV. int LoadMode(TModeNet& Graph, PTable Table, const TStr& NCol, TStrV& NodeAttrV); /// Loads a crossnet from Mode1 to Mode2, with name CrossName, from the provided TTable. EdgeAttrV specifies edge attributes. int LoadCrossNetToNet(PMMNet Graph, const TStr& Mode1, const TStr& Mode2, const TStr& CrossName, PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV); /// Loads the edges from the TTable and EdgeAttrV specifies columns containing edge attributes. int LoadCrossNet(TCrossNet& Graph, PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV); /// Converts table to a network sequentially. Use if network has only edge attributes. template<class PGraph> PGraph ToNetwork(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { PGraph Graph = PGraph::TObj::New(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); //Table->AddGraphAttributeV(SrcAttrV, false, true, false); //Table->AddGraphAttributeV(DstAttrV, false, false, true); //Table->AddGraphAttributeV(EdgeAttrV, true, false, true); // node values - i.e. the unique values of src/dst col //THashSet<TInt> IntNodeVals; // for both int and string node attr types. THash<TFlt, TInt> FltNodeVals; // make single pass over all rows in the table for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal, DVal; if (NodeType == atFlt) { TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal); TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal); } else if (NodeType == atInt || NodeType == atStr) { if (NodeType == atInt) { SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx]; DVal = (Table->IntCols)[DstColIdx][CurrRowIdx]; } else { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value } if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); } if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); } //CheckAndAddIntNode(Graph, IntNodeVals, SVal); //CheckAndAddIntNode(Graph, IntNodeVals, DVal); } // add edge and edge attributes Graph->AddEdge(SVal, DVal, CurrRowIdx); // Aggregate edge attributes and add to graph for (TInt i = 0; i < EdgeAttrV.Len(); i++) { TStr ColName = EdgeAttrV[i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName); break; } } } return Graph; } #ifdef GCC_ATOMIC /// Converts table to network in parallel. Use if network has only edge attributes. template<class PGraphMP> inline PGraphMP ToNetworkMP(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { TStopwatch* Sw = TStopwatch::GetInstance(); Sw->Start(TStopwatch::AllocateColumnCopies); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TInt NumRows = Table->GetNumValidRows(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2; THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol2.Reserve(NumRows, NumRows); } } Sw->Stop(TStopwatch::AllocateColumnCopies); Sw->Start(TStopwatch::CopyColumns); TIntPrV Partitions; Table->GetPartitionRanges(Partitions, omp_get_max_threads()); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } Sw->Stop(TStopwatch::CopyColumns); Sw->Start(TStopwatch::Sort); omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(SrcCol1, EdgeCol1) #endif { TTable::QSortKeyVal(SrcCol1, EdgeCol1, 0, NumRows-1); } } #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(EdgeCol2, DstCol2) #endif { TTable::QSortKeyVal(DstCol2, EdgeCol2, 0, NumRows-1); } } #ifndef GLib_WIN32 #pragma omp taskwait #endif } Sw->Stop(TStopwatch::Sort); Sw->Start(TStopwatch::Group); TInt NumThreads = omp_get_max_threads(); TInt PartSize = (NumRows/NumThreads); // Find the offset of all partitions, each of which contains a list of rows. // Nodes from same sources or destinations are ensured to be kept within same partition. TIntV SrcOffsets, DstOffsets; SrcOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) { // ensure that rows from the same sources are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); } } SrcOffsets.Add(NumRows); DstOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) { // ensure that rows to the same destinations are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); } } DstOffsets.Add(NumRows); TInt SrcPartCnt = SrcOffsets.Len()-1; // number of partitions TInt DstPartCnt = DstOffsets.Len()-1; // number of partitions // count the number of source nodes and destination nodes in each partition TIntV SrcNodeCounts, DstNodeCounts; SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt); DstNodeCounts.Reserve(DstPartCnt, DstPartCnt); #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { SrcNodeCounts[i] = 1; TInt CurrNode = SrcCol1[SrcOffsets[i]]; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { SrcNodeCounts[i]++; CurrNode = SrcCol1[j]; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { DstNodeCounts[i] = 1; TInt CurrNode = DstCol2[DstOffsets[i]]; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { DstNodeCounts[i]++; CurrNode = DstCol2[j]; } } } } } TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < SrcPartCnt; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < DstPartCnt; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val); // find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } // Find the starting offset of each node (in both src and dst) #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { TInt CurrNode = SrcCol1[SrcOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { CurrNode = SrcCol1[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { TInt CurrNode = DstCol2[DstOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]); TInt CurrCount = 1; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { CurrNode = DstCol2[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } Sw->Stop(TStopwatch::Group); Sw->Start(TStopwatch::MergeNeighborhoods); // Find the combined neighborhood (both out-neighbors and in-neighbors) of each node TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } Sw->Stop(TStopwatch::MergeNeighborhoods); Sw->Start(TStopwatch::AddNeighborhoods); TInt NumNodes = Nodes.Len(); PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows); // NumThreads = omp_get_max_threads(); // int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); // omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,100) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = EdgeCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } OutVV[m].Reserve(Sz); OutVV[m].CopyUniqueFrom(EdgeCol1, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = EdgeCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } InVV[m].Reserve(Sz); InVV[m].CopyUniqueFrom(EdgeCol2, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); } Graph->SetNodes(NumNodes); Sw->Stop(TStopwatch::AddNeighborhoods); Sw->Start(TStopwatch::AddEdges); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetIntAttr(SrcColIdx); TInt DstId = RowI.GetIntAttr(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetStrMapById(SrcColIdx); TInt DstId = RowI.GetStrMapById(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } Graph->SetEdges(NumRows); Graph->SetMxEId(NumRows); Sw->Stop(TStopwatch::AddEdges); // make single pass over all rows in the table to add attributes for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) { TStr ColName = EdgeAttrV[ea_i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName); break; } } } // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } #endif // GCC_ATOMIC /// Converts table to network sequentially. Takes edges from \c Table and nodes explicitly from \c NodeCol in \c NodeTable, with attribute vectors passed as columns in corresponding tables. template<class PGraph> PGraph ToNetwork(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV, PTable NodeTable, const TStr& NodeCol, TStrV& NodeAttrV, TAttrAggr AggrPolicy) { PGraph Graph = PGraph::TObj::New(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TAttrType NodeTypeN = NodeTable->GetColType(NodeCol); const TInt NodeColIdx = NodeTable->GetColIdx(NodeCol); THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; //Table->AddGraphAttributeV(SrcAttrV, false, true, false); //Table->AddGraphAttributeV(DstAttrV, false, false, true); //Table->AddGraphAttributeV(EdgeAttrV, true, false, true); // node values - i.e. the unique values of src/dst col //THashSet<TInt> IntNodeVals; // for both int and string node attr types. THash<TFlt, TInt> FltNodeVals; // make single pass over all rows in the table for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal, DVal; if (NodeType == atFlt) { TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal); TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal); } else if (NodeType == atInt || NodeType == atStr) { if (NodeType == atInt) { SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx]; DVal = (Table->IntCols)[DstColIdx][CurrRowIdx]; } else { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value } if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); } if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); } //CheckAndAddIntNode(Graph, IntNodeVals, SVal); //CheckAndAddIntNode(Graph, IntNodeVals, DVal); } // add edge and edge attributes Graph->AddEdge(SVal, DVal, CurrRowIdx); // Aggregate edge attributes and add to graph for (TInt i = 0; i < EdgeAttrV.Len(); i++) { TStr ColName = EdgeAttrV[i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName); break; } } } //Add node attribtes if (NodeAttrV.Len() > 0) { for (int CurrRowIdx = 0; CurrRowIdx < (NodeTable->Next).Len(); CurrRowIdx++) { if ((NodeTable->Next)[CurrRowIdx] == NodeTable->Invalid) { continue; } TInt NId; if (NodeTypeN == atInt) { NId = (NodeTable->IntCols)[NodeColIdx][CurrRowIdx]; } else if (NodeTypeN == atStr){ NId = (NodeTable->StrColMaps)[NodeColIdx][CurrRowIdx]; } for (TInt i = 0; i < NodeAttrV.Len(); i++) { TStr ColName = NodeAttrV[i]; TAttrType T = NodeTable->GetColType(ColName); TInt Index = NodeTable->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatN(NId, NodeTable->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatN(NId, NodeTable->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatN(NId, NodeTable->GetStrVal(Index, CurrRowIdx), ColName); break; } } } } return Graph; } #ifdef GCC_ATOMIC /// Converts table to network in parallel. Takes edges from \c Table and nodes explicitly from \c NodeCol in \c NodeTable, with attribute vectors passed as columns in corresponding tables. template<class PGraphMP> inline PGraphMP ToNetworkMP(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV, PTable NodeTable, const TStr& NodeCol, TStrV& NodeAttrV, TAttrAggr AggrPolicy) { TStopwatch* Sw = TStopwatch::GetInstance(); Sw->Start(TStopwatch::AllocateColumnCopies); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TInt NumRows = Table->GetNumValidRows(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2; const TAttrType NodeTypeN = NodeTable->GetColType(NodeCol); const TInt NodeColIdx = NodeTable->GetColIdx(NodeCol); THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol2.Reserve(NumRows, NumRows); } } Sw->Stop(TStopwatch::AllocateColumnCopies); Sw->Start(TStopwatch::CopyColumns); TIntPrV Partitions; Table->GetPartitionRanges(Partitions, omp_get_max_threads()); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } Sw->Stop(TStopwatch::CopyColumns); Sw->Start(TStopwatch::Sort); omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(SrcCol1, EdgeCol1) #endif { TTable::QSortKeyVal(SrcCol1, EdgeCol1, 0, NumRows-1); } } #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(EdgeCol2, DstCol2) #endif { TTable::QSortKeyVal(DstCol2, EdgeCol2, 0, NumRows-1); } } #ifndef GLib_WIN32 #pragma omp taskwait #endif } Sw->Stop(TStopwatch::Sort); Sw->Start(TStopwatch::Group); TInt NumThreads = omp_get_max_threads(); TInt PartSize = (NumRows/NumThreads); // Find the offset of all partitions, each of which contains a list of rows. // Nodes from same sources or destinations are ensured to be kept within same partition. TIntV SrcOffsets, DstOffsets; SrcOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) { // ensure that rows from the same sources are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); } } SrcOffsets.Add(NumRows); DstOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) { // ensure that rows to the same destinations are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); } } DstOffsets.Add(NumRows); TInt SrcPartCnt = SrcOffsets.Len()-1; // number of partitions TInt DstPartCnt = DstOffsets.Len()-1; // number of partitions // count the number of source nodes and destination nodes in each partition TIntV SrcNodeCounts, DstNodeCounts; SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt); DstNodeCounts.Reserve(DstPartCnt, DstPartCnt); #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { SrcNodeCounts[i] = 1; TInt CurrNode = SrcCol1[SrcOffsets[i]]; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { SrcNodeCounts[i]++; CurrNode = SrcCol1[j]; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { DstNodeCounts[i] = 1; TInt CurrNode = DstCol2[DstOffsets[i]]; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { DstNodeCounts[i]++; CurrNode = DstCol2[j]; } } } } } TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < SrcPartCnt; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < DstPartCnt; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val); // find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } // Find the starting offset of each node (in both src and dst) #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { TInt CurrNode = SrcCol1[SrcOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { CurrNode = SrcCol1[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { TInt CurrNode = DstCol2[DstOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]); TInt CurrCount = 1; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { CurrNode = DstCol2[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } Sw->Stop(TStopwatch::Group); Sw->Start(TStopwatch::MergeNeighborhoods); // Find the combined neighborhood (both out-neighbors and in-neighbors) of each node TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } Sw->Stop(TStopwatch::MergeNeighborhoods); Sw->Start(TStopwatch::AddNeighborhoods); TInt NumNodes = Nodes.Len(); PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows); // NumThreads = omp_get_max_threads(); // int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); // omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,100) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = EdgeCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } OutVV[m].Reserve(Sz); OutVV[m].CopyUniqueFrom(EdgeCol1, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = EdgeCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } InVV[m].Reserve(Sz); InVV[m].CopyUniqueFrom(EdgeCol2, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); } Graph->SetNodes(NumNodes); Sw->Stop(TStopwatch::AddNeighborhoods); Sw->Start(TStopwatch::AddEdges); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetIntAttr(SrcColIdx); TInt DstId = RowI.GetIntAttr(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetStrMapById(SrcColIdx); TInt DstId = RowI.GetStrMapById(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } Graph->SetEdges(NumRows); Graph->SetMxEId(NumRows); Sw->Stop(TStopwatch::AddEdges); // make single pass over all rows in the table to add attributes for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) { TStr ColName = EdgeAttrV[ea_i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName); break; } } } // Add node attribtes if (NodeAttrV.Len() > 0) { for (int CurrRowIdx = 0; CurrRowIdx < (NodeTable->Next).Len(); CurrRowIdx++) { if ((NodeTable->Next)[CurrRowIdx] == NodeTable->Invalid) { continue; } TInt NId; if (NodeTypeN == atInt) { NId = (NodeTable->IntCols)[NodeColIdx][CurrRowIdx]; } else if (NodeTypeN == atStr){ NId = (NodeTable->StrColMaps)[NodeColIdx][CurrRowIdx]; } for (TInt i = 0; i < NodeAttrV.Len(); i++) { TStr ColName = NodeAttrV[i]; TAttrType T = NodeTable->GetColType(ColName); TInt Index = NodeTable->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatN(NId, NodeTable->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatN(NId, NodeTable->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatN(NId, NodeTable->GetStrVal(Index, CurrRowIdx), ColName); break; } } } } // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } #endif // GCC_ATOMIC }; // TSnap namespace #endif // CONV_H
dotproduct.c
#include "Python.h" #include "stdlib.h" #include "omp.h" #define MAX_SIZE 500*500 typedef double BASE_TYPE; BASE_TYPE row_major[MAX_SIZE]; BASE_TYPE column_major[MAX_SIZE]; BASE_TYPE **init_result_array(int total_rows, int total_columns) { //creating 2D array for copying Python list object into BASE_TYPE **result_array = (BASE_TYPE **)calloc(total_rows, sizeof(BASE_TYPE *)); for(int row = 0; row < total_rows; row++) { result_array[row] = (BASE_TYPE *)calloc(total_columns, sizeof(BASE_TYPE)); } return result_array; } BASE_TYPE **convert(PyObject *ndimarray, int rows, int columns) { //Unwraps Python list into C pointer to 2D array BASE_TYPE **c_array = init_result_array(rows, columns); PyObject *current_row; for (int i = 0; i < rows; ++i) { current_row = PyList_GetItem(ndimarray, i); for (int j = 0; j < columns; ++j) { c_array[i][j] = (BASE_TYPE )PyLong_AsLong(PyList_GetItem(current_row, j)); } } return c_array; } void transform_row_major(BASE_TYPE **ndimarray, int rows, int columns) { for (int i = 0; i < rows; i++) { for (int j = 0; j < columns; j++) { row_major[i * columns + j] = ndimarray[i][j]; } } } void transform_column_major(BASE_TYPE **ndimarray, int rows, int columns) { for (int i = 0; i < rows; i++) { for (int j = 0; j < columns; j++) { column_major[j * rows + i] = ndimarray[i][j]; } } } PyObject* build_python_array(BASE_TYPE** result_array, int rows, int columns) { // Building Python result object from C 2D array pointer PyObject *item; PyObject *pyResult = PyList_New(rows); for (int i= 0; i< rows; ++i) { item = PyList_New(columns); for (int j= 0; j< columns; ++j) { PyList_SetItem(item, j, PyLong_FromLong(result_array[i][j])); } PyList_SetItem(pyResult, i, item); } return pyResult; } PyObject* dot_product_optimized(PyObject* self, PyObject* args) { PyObject *mat1; PyObject *mat2; if (!PyArg_ParseTuple(args, "O|O", &mat1, &mat2)){ return NULL; } int mat1_rows, mat1_columns, mat2_rows, mat2_columns; mat1_rows = PyObject_Length(mat1); mat1_columns = PyObject_Length(PyList_GetItem(mat1, 0)); mat2_rows = PyObject_Length(mat2); mat2_columns = PyObject_Length(PyList_GetItem(mat2, 0)); BASE_TYPE **mat1_c = convert(mat1, mat1_rows, mat1_columns); BASE_TYPE **mat2_c = convert(mat2, mat2_rows, mat2_columns); transform_row_major(mat1_c, mat1_rows, mat1_columns); transform_column_major(mat2_c, mat2_rows, mat2_columns); BASE_TYPE **result = init_result_array(mat1_rows, mat2_columns); #pragma omp parallel num_threads(6) { int tot, iOff, jOff; #pragma omp for for(int i=0; i < mat1_rows; i++) { iOff = i * mat1_columns; for(int j=0; j < mat2_columns; j++) { tot = 0; jOff = j * mat2_rows; for(int k=0; k < mat2_rows; k++){ tot += row_major[iOff + k] * column_major[jOff + k]; } result[i][j] = tot; } } }; return Py_BuildValue("O", build_python_array(result, mat1_rows, mat2_columns)); } static PyMethodDef module_methods[] = { {"dot_product_optimized", (PyCFunction) dot_product_optimized, METH_VARARGS, "Optimized version of dot_product"} }; static struct PyModuleDef cModPyDem = { PyModuleDef_HEAD_INIT, "dotproduct", "", -1, module_methods }; PyMODINIT_FUNC PyInit_dotproduct(void) { return PyModule_Create(&cModPyDem); }
pr63326.c
/* PR c/63326 */ /* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void f1 (int x) { int i; if (x) #pragma omp barrier /* { dg-error "may only be used in compound statements" } */ ; if (x) #pragma omp flush /* { dg-error "may only be used in compound statements" } */ ; if (x) #pragma omp taskwait /* { dg-error "may only be used in compound statements" } */ ; if (x) #pragma omp taskyield /* { dg-error "may only be used in compound statements" } */ ; #pragma omp parallel { if (x) #pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */ ; } #pragma omp parallel { if (x) #pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */ ; } #pragma omp for ordered(1) for (i = 0; i < 16; i++) { if (x) #pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */ ; if (x) #pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */ ; } if (x) #pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */ ; if (x) #pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */ ; if (x) #pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */ ; } void f2 (int x) { int i; while (x) #pragma omp barrier /* { dg-error "may only be used in compound statements" } */ ; while (x) #pragma omp flush /* { dg-error "may only be used in compound statements" } */ ; while (x) #pragma omp taskwait /* { dg-error "may only be used in compound statements" } */ ; while (x) #pragma omp taskyield /* { dg-error "may only be used in compound statements" } */ ; #pragma omp parallel { while (x) #pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */ ; } #pragma omp parallel { while (x) #pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */ ; } #pragma omp for ordered(1) for (i = 0; i < 16; i++) { while (x) #pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */ ; while (x) #pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */ ; } while (x) #pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */ ; while (x) #pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */ ; while (x) #pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */ ; } void f3 (int x) { int i; for (x = 0; x < 10; x++) #pragma omp barrier /* { dg-error "may only be used in compound statements" } */ ; for (x = 0; x < 10; x++) #pragma omp flush /* { dg-error "may only be used in compound statements" } */ ; for (x = 0; x < 10; x++) #pragma omp taskwait /* { dg-error "may only be used in compound statements" } */ ; for (x = 0; x < 10; x++) #pragma omp taskyield /* { dg-error "may only be used in compound statements" } */ ; #pragma omp parallel { for (x = 0; x < 10; x++) #pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */ ; } #pragma omp parallel { for (x = 0; x < 10; x++) #pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */ ; } #pragma omp for ordered(1) for (i = 0; i < 16; i++) { for (x = 0; x < 10; x++) #pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */ ; for (x = 0; x < 10; x++) #pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */ ; } for (x = 0; x < 10; x++) #pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */ ; for (x = 0; x < 10; x++) #pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */ ; for (x = 0; x < 10; x++) #pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */ ; } void f4 (int x) { int i; { do #pragma omp barrier /* { dg-error "may only be used in compound statements" } */ while (0); } /* { dg-error "before" "" { target c++ } } */ { do #pragma omp flush /* { dg-error "may only be used in compound statements" } */ while (0); } /* { dg-error "before" "" { target c++ } } */ { do #pragma omp taskwait /* { dg-error "may only be used in compound statements" } */ while (0); } /* { dg-error "before" "" { target c++ } } */ { do #pragma omp taskyield /* { dg-error "may only be used in compound statements" } */ while (0); } /* { dg-error "before" "" { target c++ } } */ #pragma omp parallel { do #pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */ while (0); } /* { dg-error "before" "" { target c++ } } */ #pragma omp parallel { do #pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */ while (0); } /* { dg-error "before" "" { target c++ } } */ #pragma omp for ordered(1) for (i = 0; i < 16; i++) { { do #pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */ while (0); } /* { dg-error "before" "" { target c++ } } */ { do #pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */ while (0); } /* { dg-error "before" "" { target c++ } } */ } { do #pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */ while (0); } /* { dg-error "before" "" { target c++ } } */ { do #pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */ while (0); } /* { dg-error "before" "" { target c++ } } */ { do #pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */ while (0); } /* { dg-error "before" "" { target c++ } } */ } void f5 (int x) { int i; switch (x) #pragma omp barrier /* { dg-error "may only be used in compound statements" } */ ; switch (x) #pragma omp flush /* { dg-error "may only be used in compound statements" } */ ; switch (x) #pragma omp taskwait /* { dg-error "may only be used in compound statements" } */ ; switch (x) #pragma omp taskyield /* { dg-error "may only be used in compound statements" } */ ; #pragma omp parallel { switch (x) #pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */ ; } #pragma omp parallel { switch (x) #pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */ ; } #pragma omp for ordered(1) for (i = 0; i < 16; i++) { switch (x) #pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */ ; switch (x) #pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */ ; } switch (x) #pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */ ; switch (x) #pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */ ; switch (x) #pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */ ; } void f6 (int x) { int i; switch (x) { case 1: #pragma omp barrier /* { dg-error "may only be used in compound statements" } */ ; } switch (x) { case 1: #pragma omp flush /* { dg-error "may only be used in compound statements" } */ ; } switch (x) { case 1: #pragma omp taskwait /* { dg-error "may only be used in compound statements" } */ ; } switch (x) { case 1: #pragma omp taskyield /* { dg-error "may only be used in compound statements" } */ ; } #pragma omp parallel { switch (x) { case 1: #pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */ ; } } #pragma omp parallel { switch (x) { case 1: #pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */ ; } } #pragma omp for ordered(1) for (i = 0; i < 16; i++) { switch (x) { case 1: #pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */ ; } switch (x) { case 1: #pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */ ; } } switch (x) { case 1: #pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */ ; } switch (x) { case 1: #pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */ ; } switch (x) { case 1: #pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */ ; } } void f7 (int x) { int i; switch (x) { default: #pragma omp barrier /* { dg-error "may only be used in compound statements" } */ ; } switch (x) { default: #pragma omp flush /* { dg-error "may only be used in compound statements" } */ ; } switch (x) { default: #pragma omp taskwait /* { dg-error "may only be used in compound statements" } */ ; } switch (x) { default: #pragma omp taskyield /* { dg-error "may only be used in compound statements" } */ ; } #pragma omp parallel { switch (x) { default: #pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */ ; } } #pragma omp parallel { switch (x) { default: #pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */ ; } } #pragma omp for ordered(1) for (i = 0; i < 16; i++) { switch (x) { default: #pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */ ; } switch (x) { default: #pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */ ; } } switch (x) { default: #pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */ ; } switch (x) { default: #pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */ ; } switch (x) { default: #pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */ ; } } void f8 (int x) { int i; lab1: #pragma omp barrier /* { dg-error "may only be used in compound statements" } */ ; lab2: #pragma omp flush /* { dg-error "may only be used in compound statements" } */ ; lab3: #pragma omp taskwait /* { dg-error "may only be used in compound statements" } */ ; lab4: #pragma omp taskyield /* { dg-error "may only be used in compound statements" } */ ; #pragma omp parallel { lab5: #pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */ ; } #pragma omp parallel { lab6: #pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */ ; } #pragma omp for ordered(1) for (i = 0; i < 16; i++) { lab7: #pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */ ; lab8: #pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */ ; } lab9: #pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */ ; lab10: #pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */ ; lab11: #pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */ ; }
m_mesh.h
// // Created by Harold on 2020/12/14. // #ifndef M_MATH_M_MESH_H #define M_MATH_M_MESH_H #include <opencv2/core.hpp> #include <open3d/Open3D.h> #include <omp.h> namespace M_MATH { class TriangleMesh { public: enum MeshType { BallPivot, Poisson }; public: static std::shared_ptr<open3d::geometry::TriangleMesh> GenMesh(std::vector<cv::Point3f> const& pts, MeshType type); static bool SaveMesh(std::string const& filename, std::shared_ptr<open3d::geometry::TriangleMesh> mesh, bool write_ascii = false, bool compressed = false, bool write_vertex_normals = true, bool write_vertex_colors = true, bool write_triangle_uvs = true, bool print_progress = false); static std::shared_ptr<open3d::geometry::TriangleMesh> LoadMesh(std::string const& filename, bool print_progress = false); //private: static std::shared_ptr<open3d::geometry::PointCloud> ToPointCloud(std::vector<cv::Point3f> const& pts); static std::shared_ptr<open3d::geometry::TriangleMesh> GenMesh(std::shared_ptr<open3d::geometry::PointCloud> const& pcd, MeshType type); }; std::shared_ptr<open3d::geometry::PointCloud> TriangleMesh::ToPointCloud(const std::vector<cv::Point3f> &pts) { auto pcd = std::make_shared<open3d::geometry::PointCloud>(); pcd->Clear(); pcd->points_.resize(pts.size()); #pragma omp parallel for for (auto i = 0; i < pts.size(); i++) { pcd->points_[i](0) = pts[i].x; pcd->points_[i](1) = pts[i].y; pcd->points_[i](2) = pts[i].z; } return pcd; } std::shared_ptr<open3d::geometry::TriangleMesh> TriangleMesh::GenMesh(const std::shared_ptr<open3d::geometry::PointCloud>& pcd, TriangleMesh::MeshType type) { if (pcd->IsEmpty()) return nullptr; // compute normal if (!pcd->HasNormals()) { //pcd->EstimateNormals(open3d::geometry::KDTreeSearchParamKNN(), false); pcd->EstimateNormals(); pcd->NormalizeNormals(); } // gen mesh std::shared_ptr<open3d::geometry::TriangleMesh> mesh; switch (type) { case BallPivot: { std::vector<double> distances = pcd->ComputeNearestNeighborDistance(); double avg_dist = 0.f; for (auto dis : distances) avg_dist += dis; avg_dist /= distances.size(); double radius = 1.5 * avg_dist; // adjust this coefficient std::vector<double> radii = {radius, radius * 2, radius * 4, radius * 8}; mesh = open3d::geometry::TriangleMesh::CreateFromPointCloudBallPivoting(*pcd, radii); break; } case Poisson: { auto tuple = open3d::geometry::TriangleMesh::CreateFromPointCloudPoisson(*pcd); auto p_mesh = std::get<0>(tuple); auto bbox = pcd->GetAxisAlignedBoundingBox(); mesh = p_mesh->Crop(bbox); break; } } return mesh; } std::shared_ptr<open3d::geometry::TriangleMesh> TriangleMesh::GenMesh(const std::vector<cv::Point3f> &pts, TriangleMesh::MeshType type) { auto pcd = ToPointCloud(pts); return GenMesh(pcd, type); } bool TriangleMesh::SaveMesh(const std::string &filename, std::shared_ptr<open3d::geometry::TriangleMesh> mesh, bool write_ascii, bool compressed, bool write_vertex_normals, bool write_vertex_colors, bool write_triangle_uvs, bool print_progress) { return open3d::io::WriteTriangleMeshToOBJ(filename, *mesh, write_ascii, compressed, write_vertex_normals, write_vertex_colors, write_triangle_uvs, print_progress); } std::shared_ptr<open3d::geometry::TriangleMesh> TriangleMesh::LoadMesh(const std::string &filename, bool print_progress) { return open3d::io::CreateMeshFromFile(filename, print_progress); } } #endif //M_MATH_M_MESH_H
hello.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int th_id, nthreads; #pragma omp parallel private(th_id) { th_id = omp_get_thread_num(); printf("Hello World from thread %d\n", th_id); #pragma omp barrier if (th_id == 0) { nthreads = omp_get_num_threads(); printf("There are %d threads\n",nthreads); } } return EXIT_SUCCESS; }
BitmapPrimitiveShape.h
#ifndef BITMAPPRIMITIVESHAPE_HEADER #define BITMAPPRIMITIVESHAPE_HEADER #include "BasePrimitiveShape.h" #include <GfxTL/AABox.h> #include <MiscLib/Vector.h> #include <algorithm> #include <istream> #include <MiscLib/Performance.h> #include <GfxTL/MathHelper.h> #include <GfxTL/IndexedIterator.h> #include "IndexIterator.h" #include <MiscLib/Pair.h> #ifdef DOPARALLEL #include <omp.h> #endif #ifndef DLL_LINKAGE #define DLL_LINKAGE #endif namespace schnabel { struct BitmapInfo { MiscLib::Vector< std::pair< float, float > > params; MiscLib::Vector< char > bitmap; GfxTL::AABox< GfxTL::Vector2Df > bbox; MiscLib::Vector< size_t > bmpIdx; size_t uextent, vextent; }; class DLL_LINKAGE BitmapPrimitiveShape : public BasePrimitiveShape { public: bool Init(bool binary, std::istream *i); size_t ConnectedComponent(const PointCloud &pc, float epsilon, MiscLib::Vector< size_t > *indices, bool doFiltering = true, float* borderRatio = 0 ); size_t AllConnectedComponents(const PointCloud &pc, float epsilon, BitmapInfo& bitmapInfo, MiscLib::Vector< size_t > *indices, MiscLib::Vector< int >& componentsImg, MiscLib::Vector< std::pair< int, size_t > >& labels, bool doFiltering = true ); void TrimmingPolygons(const PointCloud &pc, float epsilon, size_t begin, size_t end, std::deque< ComponentPolygons > *polys) const; void GenerateBitmapPoints(const PointCloud &pc, float epsilon, size_t begin, size_t end, PointCloud *bmpPc) const; public: virtual void Parameters(const Vec3f &p, std::pair< float, float > *param) const = 0; virtual bool InSpace(float u, float v, Vec3f *p, Vec3f *n) const = 0; virtual void Parameters(GfxTL::IndexedIterator< MiscLib::Vector< size_t >::iterator, PointCloud::const_iterator > begin, GfxTL::IndexedIterator< MiscLib::Vector< size_t >::iterator, PointCloud::const_iterator > end, MiscLib::Vector< std::pair< float, float > > *bmpParams) const = 0; virtual void Parameters(GfxTL::IndexedIterator< IndexIterator, PointCloud::const_iterator > begin, GfxTL::IndexedIterator< IndexIterator, PointCloud::const_iterator > end, MiscLib::Vector< std::pair< float, float > > *bmpParams) const = 0; virtual void BitmapExtent(float epsilon, GfxTL::AABox< GfxTL::Vector2Df > *bbox, MiscLib::Vector< std::pair< float, float > > *params, size_t *uextent, size_t *vextent) = 0; virtual void InBitmap(const std::pair< float, float > &param, float epsilon, const GfxTL::AABox< GfxTL::Vector2Df > &bbox, size_t uextent, size_t vextent, std::pair< int, int > *inBmp) const = 0; virtual void PreWrapBitmap(const GfxTL::AABox< GfxTL::Vector2Df > &bbox, float epsilon, size_t uextent, size_t vextent, MiscLib::Vector< char > *bmp) const; virtual void WrapBitmap(const GfxTL::AABox< GfxTL::Vector2Df > &bbox, float epsilon, bool *uwrap, bool *vwrap) const = 0; virtual void WrapComponents(const GfxTL::AABox< GfxTL::Vector2Df > &bbox, float epsilon, size_t uextent, size_t vextent, MiscLib::Vector< int > *componentImg, MiscLib::Vector< std::pair< int, size_t > > *labels) const; virtual bool InSpace(size_t u, size_t v, float epsilon, const GfxTL::AABox< GfxTL::Vector2Df > &bbox, size_t uextent, size_t vextent, Vec3f *p, Vec3f *n) const = 0; template< class IteratorT > void BuildBitmap(const PointCloud &pc, float *epsilon, IteratorT begin, IteratorT end, MiscLib::Vector< std::pair< float, float > > *params, GfxTL::AABox< GfxTL::Vector2Df > *bbox, MiscLib::Vector< char > *bitmap, size_t *uextent, size_t *vextent, MiscLib::Vector< size_t > *bmpIdx) const; template< class IteratorT > void BuildBitmap(const PointCloud &pc, float *epsilon, IteratorT begin, IteratorT end, MiscLib::Vector< std::pair< float, float > > *params, GfxTL::AABox< GfxTL::Vector2Df > *bbox, MiscLib::Vector< char > *bitmap, size_t *uextent, size_t *vextent, MiscLib::Vector< size_t > *bmpIdx, size_t border) const; void BuildPolygons(const PointCloud &pc, float epsilon, size_t begin, size_t end, GfxTL::AABox< GfxTL::Vector2Df > *bbox, size_t *uextent, size_t *vextent, std::deque< ComponentPolygons > *polys) const; protected: mutable GfxTL::AABox< GfxTL::Vector2Df > m_extBbox; }; template< class IteratorT > void BitmapPrimitiveShape::BuildBitmap(const PointCloud &pc, float *epsilon, IteratorT begin, IteratorT end, MiscLib::Vector< std::pair< float, float > > *params, GfxTL::AABox< GfxTL::Vector2Df > *bbox, MiscLib::Vector< char > *bitmap, size_t *uextent, size_t *vextent, MiscLib::Vector< size_t > *bmpIdx) const { int size = end - begin; params->resize(size); // compute parameters and extent Parameters(GfxTL::IndexIterate(begin, pc.begin()), GfxTL::IndexIterate(end, pc.begin()), params); bbox->Min() = GfxTL::Vector2Df(std::numeric_limits< float >::infinity(), std::numeric_limits< float >::infinity()); bbox->Max() = -bbox->Min(); for(size_t i = 0; i < (size_t)size; ++i) { if((*params)[i].first < bbox->Min()[0]) bbox->Min()[0] = (*params)[i].first; if((*params)[i].first > bbox->Max()[0]) bbox->Max()[0] = (*params)[i].first; if((*params)[i].second < bbox->Min()[1]) bbox->Min()[1] = (*params)[i].second; if((*params)[i].second > bbox->Max()[1]) bbox->Max()[1] = (*params)[i].second; } // bbox gives the bounding box in parameter space // we can now set up the bitmap const_cast< BitmapPrimitiveShape * >(this)->BitmapExtent(*epsilon, bbox, params, uextent, vextent); if(*uextent < 2) *uextent = 2; if(*vextent < 2) *vextent = 2; bitmap->resize((*uextent) * (*vextent)); std::fill(bitmap->begin(), bitmap->end(), false); // set all true bits in bitmap bmpIdx->resize(params->size()); //#pragma omp parallel for schedule(static) for(int i = 0; i < size; ++i) { std::pair< int, int > bmpParam; InBitmap((*params)[i], *epsilon, *bbox, *uextent, *vextent, &bmpParam); // clamp bitmap coords bmpParam.first = GfxTL::Math< int >::Clamp(bmpParam.first, 0, *uextent - 1); bmpParam.second = GfxTL::Math< int >::Clamp(bmpParam.second, 0, *vextent - 1); (*bitmap)[(*bmpIdx)[i] = bmpParam.first + bmpParam.second * (*uextent)] = true; } } template< class IteratorT > void BitmapPrimitiveShape::BuildBitmap(const PointCloud &pc, float *epsilon, IteratorT begin, IteratorT end, MiscLib::Vector< std::pair< float, float > > *params, GfxTL::AABox< GfxTL::Vector2Df > *bbox, MiscLib::Vector< char > *bitmap, size_t *uextent, size_t *vextent, MiscLib::Vector< size_t > *bmpIdx, size_t border) const { params->resize(end - begin); // compute parameters and extent Parameters(pc[*begin].pos, &(*params)[0]); bbox->Min() = bbox->Max() = GfxTL::Vector2Df((*params)[0].first, (*params)[0].second); size_t j = 1; IteratorT i = begin; for(++i; i != end; ++i, ++j) { Parameters(pc[*i].pos, &(*params)[j]); if(bbox->Min()[0] > (*params)[j].first) bbox->Min()[0] = (*params)[j].first; else if(bbox->Max()[0] < (*params)[j].first) bbox->Max()[0] = (*params)[j].first; if(bbox->Min()[1] > (*params)[j].second) bbox->Min()[1] = (*params)[j].second; else if(bbox->Max()[1] < (*params)[j].second) bbox->Max()[1] = (*params)[j].second; } // bbox gives the bounding box in parameter space // we can now set up the bitmap const_cast< BitmapPrimitiveShape * >(this)->BitmapExtent(*epsilon, bbox, params, uextent, vextent); if(*uextent < 2) *uextent = 2; if(*vextent < 2) *vextent = 2; bitmap->resize(((*uextent) + 2 * border) * ((*vextent) + 2 * border)); std::fill(bitmap->begin(), bitmap->end(), false); // set all true bits in bitmap bmpIdx->resize(params->size()); size_t lineWidth = (*uextent) + 2 * border; for(size_t i = 0; i < params->size(); ++i) { std::pair< int, int > bmpParam; InBitmap((*params)[i], *epsilon, *bbox, *uextent, *vextent, &bmpParam); // clamp bitmap coords bmpParam.first = GfxTL::Math< int >::Clamp(bmpParam.first, 0, *uextent - 1); bmpParam.second = GfxTL::Math< int >::Clamp(bmpParam.second, 0, *vextent - 1); (*bitmap)[(*bmpIdx)[i] = bmpParam.first + border + (bmpParam.second + border) * lineWidth] = true; } } } //...ns schnabel #endif
csrgemv_task.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. #pragma once #include <cstring> #include <thread> #include "bof_types.h" #include "bof_utils.h" #include "tasks/task.h" namespace flash { class CsrGemvNoTransInMem : public BaseTask { // matrix specs MKL_INT* ia; flash_ptr<MKL_INT> ja; flash_ptr<FPTYPE> a; FBLAS_UINT dim; FBLAS_UINT a_nrows; FBLAS_UINT nnzs; // vector specs FPTYPE* in; FPTYPE* out; public: CsrGemvNoTransInMem(FBLAS_UINT start_row, FBLAS_UINT a_rows, FBLAS_UINT a_cols, FBLAS_UINT a_rblk_size, MKL_INT* ia, flash_ptr<MKL_INT> ja, flash_ptr<FPTYPE> a, FPTYPE* v_in, FPTYPE* v_out) { // matrix specs this->a_nrows = std::min(a_rows - start_row, a_rblk_size); this->dim = std::max(a_nrows, a_cols); this->ja = ja + ia[start_row]; this->a = a + ia[start_row]; // copy over offsets and remove start offset this->ia = new MKL_INT[this->dim + 1]; memcpy(this->ia, ia + start_row, (this->a_nrows + 1) * sizeof(MKL_INT)); for (FBLAS_UINT i = 1; i <= this->a_nrows; i++) { this->ia[i] -= this->ia[0]; } this->ia[0] = 0; for (FBLAS_UINT i = this->a_nrows + 1; i <= this->dim; i++) { this->ia[i] = this->ia[this->a_nrows]; } this->in = v_in; this->out = v_out + start_row; // add reads this->nnzs = this->ia[this->dim] - this->ia[0]; StrideInfo sinfo; sinfo.stride = 0; sinfo.n_strides = 1; sinfo.len_per_stride = this->nnzs * sizeof(FPTYPE); this->add_read(this->a, sinfo); sinfo.len_per_stride = this->nnzs * sizeof(MKL_INT); this->add_read(this->ja, sinfo); } void execute() { MKL_INT* ja_ptr = (MKL_INT*) this->in_mem_ptrs[this->ja]; FPTYPE* a_ptr = (FPTYPE*) this->in_mem_ptrs[this->a]; FPTYPE* v_out = nullptr; if (this->dim > this->a_nrows) { v_out = new FPTYPE[this->dim]; } else { v_out = this->out; } // MKL parameters; char transa = 'N'; MKL_INT m = this->dim; // execute MKL call mkl_csrgemv(&transa, &m, a_ptr, this->ia, ja_ptr, this->in, v_out); if (this->dim > this->a_nrows) { memcpy(this->out, v_out, this->a_nrows * sizeof(FPTYPE)); delete[] v_out; } // free memory for ia delete[] this->ia; } FBLAS_UINT size() { if (this->dim > this->a_nrows) { return (this->nnzs * (sizeof(MKL_INT) + sizeof(FPTYPE)) + (this->dim + this->a_nrows) * sizeof(FPTYPE)); } else { return (this->nnzs * (sizeof(MKL_INT) + sizeof(FPTYPE)) + (this->a_nrows * sizeof(FPTYPE))); } } }; class CsrGemvTransInMem : public BaseTask { // matrix specs MKL_INT* ia; flash_ptr<MKL_INT> ja; flash_ptr<FPTYPE> a; FBLAS_UINT blk_size; FBLAS_UINT a_rows; FBLAS_UINT a_cols; FBLAS_UINT dim; FBLAS_UINT nnzs; // `atomic` access to output array std::mutex& mut; // vector specs FPTYPE* in; FPTYPE* out; public: CsrGemvTransInMem(FBLAS_UINT start_row, FBLAS_UINT a_rows, FBLAS_UINT a_cols, FBLAS_UINT a_rblk_size, MKL_INT* ia, flash_ptr<MKL_INT> ja, flash_ptr<FPTYPE> a, FPTYPE* v_in, FPTYPE* v_out, std::mutex& sync_mut) : mut(std::ref(sync_mut)) { // matrix specs this->blk_size = std::min(a_rows - start_row, a_rblk_size); this->dim = std::max(a_rows, a_cols); this->ja = ja + ia[start_row]; this->a = a + ia[start_row]; // copy over offsets and remove start offset this->ia = new MKL_INT[this->dim + 1]; memcpy(this->ia, ia + start_row, (this->blk_size + 1) * sizeof(MKL_INT)); for (FBLAS_UINT i = 1; i <= this->blk_size; i++) { this->ia[i] -= this->ia[0]; } this->ia[0] = 0; for (FBLAS_UINT i = this->blk_size + 1; i <= this->dim; i++) { this->ia[i] = this->ia[this->blk_size]; } this->in = v_in + start_row; this->out = v_out; this->a_rows = a_rows; this->a_cols = a_cols; // add reads this->nnzs = this->ia[this->dim] - this->ia[0]; StrideInfo sinfo; sinfo.stride = 0; sinfo.n_strides = 1; sinfo.len_per_stride = this->nnzs * sizeof(FPTYPE); this->add_read(this->a, sinfo); sinfo.len_per_stride = this->nnzs * sizeof(MKL_INT); this->add_read(this->ja, sinfo); } void execute() { MKL_INT* ja_ptr = (MKL_INT*) this->in_mem_ptrs[this->ja]; FPTYPE* a_ptr = (FPTYPE*) this->in_mem_ptrs[this->a]; // prepare MKL parameters; char transa = 'T'; MKL_INT m = (MKL_INT) this->dim; FPTYPE* v_out = new FPTYPE[this->dim]; memset(v_out, 0, this->dim * sizeof(FPTYPE)); FPTYPE* v_in = new FPTYPE[this->dim]; memset(v_in, 0, this->dim * sizeof(FPTYPE)); memcpy(v_in, this->in, this->blk_size * sizeof(FPTYPE)); // execute MKL call mkl_csrgemv(&transa, &m, a_ptr, this->ia, ja_ptr, v_in, v_out); delete[] this->ia; delete[] v_in; // lock and add to existing result { std::unique_lock<std::mutex> lk(this->mut); #pragma omp parallel for for (FBLAS_UINT i = 0; i < this->a_cols; i++) { this->out[i] += v_out[i]; } } delete[] v_out; } FBLAS_UINT size() { return (this->nnzs * (sizeof(MKL_INT) + sizeof(FPTYPE))) + (this->dim * (sizeof(FPTYPE) + sizeof(MKL_INT))) + (this->dim > this->blk_size ? this->dim * sizeof(FPTYPE) : 0); } }; } // namespace flash
omp-taskloop-single.c
#include <omp.h> #include <unistd.h> #include <stdio.h> #define THREADS 4 #define LEN 5 int main(void) { int j=0; #pragma omp parallel num_threads(THREADS) { #pragma omp single nowait { #pragma omp taskloop nogroup for (j=0; j<LEN; j++) { usleep(50); } #pragma omp taskwait #pragma omp taskloop nogroup for (j=0; j<LEN; j++) { usleep(50); } #pragma omp taskwait } #pragma omp for nowait for (j=0; j<LEN; j++) { #pragma omp task usleep(50); } #pragma omp taskwait } return 0; }
Range.h
#ifndef __APPROXFLOW_RANGE__ #define __APPROXFLOW_RANGE__ #include <functional> // #include <omp.h> namespace ApproxFlow { template<typename Type> class RangeType { private: Type _begin, _end; public: RangeType(); RangeType(Type begin, Type end); Type begin() const {return _begin; } Type end() const {return _end; } Type size() const {return _end - _begin; } Type length() const {return _end - _begin; } void foreach(std::function<void(Type)> func) const; void parfor(std::function<void(Type)> func) const; }; template<typename Type> RangeType<Type>::RangeType(): _begin(0), _end(0) { ; } template<typename Type> RangeType<Type>::RangeType(Type begin, Type end): _begin(begin), _end(end) { #ifdef __UTILITIES_DEBUG__ if(begin < 0 || end < 0) { std::cerr << "ERROR: RangeType::RangeType(Type begin, Type end) -> range must be positive. " << std::endl; exit(22); } if(begin > end) { std::cerr << "ERROR: RangeType::RangeType(Type begin, Type end) -> begin cannot be larger than end. " << std::endl; exit(22); } #endif ; } template<typename Type> void RangeType<Type>::foreach(std::function<void(Type)> func) const { for(Type idx = _begin; idx < _end; idx++) { func(idx); } } template<typename Type> void RangeType<Type>::parfor(std::function<void(Type)> func) const { #pragma omp parallel for for(Type idx = _begin; idx < _end; idx++) { func(idx); } } typedef RangeType<size_t> Range; } #endif
ocp_nlp_sqp.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_sqp.h" // external #include <assert.h> #include <math.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" #include "acados_c/ocp_qp_interface.h" /************************************************ * options ************************************************/ acados_size_t ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; acados_size_t size = 0; size += sizeof(ocp_nlp_sqp_opts); size += ocp_nlp_opts_calculate_size(config, dims); return size; } void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_opts); opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr); c_ptr += ocp_nlp_opts_calculate_size(config, dims); assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // int ii; // this first !!! ocp_nlp_opts_initialize_default(config, dims, nlp_opts); // SQP opts opts->max_iter = 20; opts->tol_stat = 1e-8; opts->tol_eq = 1e-8; opts->tol_ineq = 1e-8; opts->tol_comp = 1e-8; opts->ext_qp_res = 0; opts->qp_warm_start = 0; opts->warm_start_first_qp = false; opts->rti_phase = 0; opts->print_level = 0; opts->initialize_t_slacks = 0; // overwrite default submodules opts // qp tolerance qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", &opts->tol_stat); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", &opts->tol_eq); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", &opts->tol_comp); return; } void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_update(config, dims, nlp_opts); return; } void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { ocp_nlp_opts_set(config, nlp_opts, field, value); if (!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "max_iter")) { int* max_iter = (int *) value; opts->max_iter = *max_iter; } else if (!strcmp(field, "tol_stat")) { double* tol_stat = (double *) value; opts->tol_stat = *tol_stat; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", value); } else if (!strcmp(field, "tol_eq")) { double* tol_eq = (double *) value; opts->tol_eq = *tol_eq; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", value); } else if (!strcmp(field, "tol_ineq")) { double* tol_ineq = (double *) value; opts->tol_ineq = *tol_ineq; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", value); } else if (!strcmp(field, "tol_comp")) { double* tol_comp = (double *) value; opts->tol_comp = *tol_comp; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", value); } else if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else if (!strcmp(field, "warm_start_first_qp")) { bool* warm_start_first_qp = (bool *) value; opts->warm_start_first_qp = *warm_start_first_qp; } else if (!strcmp(field, "rti_phase")) { int* rti_phase = (int *) value; if (*rti_phase < 0 || *rti_phase > 0) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field."); printf("possible values are: 0\n"); exit(1); } else opts->rti_phase = *rti_phase; } else if (!strcmp(field, "print_level")) { int* print_level = (int *) value; if (*print_level < 0) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level); exit(1); } opts->print_level = *print_level; } else if (!strcmp(field, "initialize_t_slacks")) { int* initialize_t_slacks = (int *) value; if (*initialize_t_slacks != 0 && *initialize_t_slacks != 1) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for initialize_t_slacks field, need int 0 or 1, got %d.", *initialize_t_slacks); exit(1); } opts->initialize_t_slacks = *initialize_t_slacks; } else { ocp_nlp_opts_set(config, nlp_opts, field, value); } } return; } void ocp_nlp_sqp_opts_set_at_stage(void *config_, void *opts_, size_t stage, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value); return; } /************************************************ * memory ************************************************/ acados_size_t ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; acados_size_t size = 0; size += sizeof(ocp_nlp_sqp_memory); // nlp mem size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat int stat_m = opts->max_iter+1; int stat_n = 6; if (opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 3*8; // align make_int_multiple_of(8, &size); return size; } void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_memory); align_char_to(8, &c_ptr); // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat mem->stat = (double *) c_ptr; mem->stat_m = opts->max_iter+1; mem->stat_n = 6; if (opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; align_char_to(8, &c_ptr); assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ acados_size_t ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; acados_size_t size = 0; // sqp size += sizeof(ocp_nlp_sqp_workspace); // nlp size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // tmp qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // tmp qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } return size; } static void ocp_nlp_sqp_cast_workspace(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_workspace *work) { ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_workspace); // nlp work->nlp_work = ocp_nlp_workspace_assign(config, dims, nlp_opts, nlp_mem, c_ptr); c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // tmp qp in work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // tmp qp out work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer0, timer1; acados_tic(&timer0); ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; // zero timers double total_time = 0.0; double tmp_time; mem->time_qp_sol = 0.0; mem->time_qp_solver_call = 0.0; mem->time_qp_xcond = 0.0; mem->time_lin = 0.0; mem->time_reg = 0.0; mem->time_tot = 0.0; mem->time_glob = 0.0; mem->time_sim = 0.0; mem->time_sim_la = 0.0; mem->time_sim_ad = 0.0; int N = dims->N; int ii; int qp_iter = 0; int qp_status = 0; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->nlp_opts->num_threads); #pragma omp parallel { // beginning of parallel region #endif // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]); } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii <= N; ii++) { config->cost[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_Z_ptr(nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii <= N; ii++) { config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxs_rev_ptr(nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxe_ptr(nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem); config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem); config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem); config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem); config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem); config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem); config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem); config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem); config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif // NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute; // -> remove here and make sure precompute is called everywhere. for (ii = 0; ii < N; ii++) { config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif // if (opts->initialize_t_slacks > 0) ocp_nlp_initialize_t_slacks(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // initialize QP ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // main sqp loop int sqp_iter = 0; nlp_mem->sqp_iter = &sqp_iter; for (; sqp_iter < opts->max_iter; sqp_iter++) { // linearizate NLP and update QP matrices acados_tic(&timer1); ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_lin += acados_toc(&timer1); #ifdef MEASURE_TIMINGS // get timings from integrator for (ii=0; ii<N; ii++) { config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], "time_sim", &tmp_time); mem->time_sim += tmp_time; config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], "time_sim_la", &tmp_time); mem->time_sim_la += tmp_time; config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], "time_sim_ad", &tmp_time); mem->time_sim_ad += tmp_time; } #endif // MEASURE_TIMINGS // update QP rhs for SQP (step prim var, abs dual var) ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // compute nlp residuals ocp_nlp_res_compute(dims, nlp_in, nlp_out, nlp_mem->nlp_res, nlp_mem); nlp_out->inf_norm_res = nlp_mem->nlp_res->inf_norm_res_stat; nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_eq > nlp_out->inf_norm_res) ? nlp_mem->nlp_res->inf_norm_res_eq : nlp_out->inf_norm_res; nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_ineq > nlp_out->inf_norm_res) ? nlp_mem->nlp_res->inf_norm_res_ineq : nlp_out->inf_norm_res; nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_comp > nlp_out->inf_norm_res) ? nlp_mem->nlp_res->inf_norm_res_comp : nlp_out->inf_norm_res; if (opts->print_level > sqp_iter + 1) print_ocp_qp_in(nlp_mem->qp_in); // save statistics if (sqp_iter < mem->stat_m) { mem->stat[mem->stat_n*sqp_iter+0] = nlp_mem->nlp_res->inf_norm_res_stat; mem->stat[mem->stat_n*sqp_iter+1] = nlp_mem->nlp_res->inf_norm_res_eq; mem->stat[mem->stat_n*sqp_iter+2] = nlp_mem->nlp_res->inf_norm_res_ineq; mem->stat[mem->stat_n*sqp_iter+3] = nlp_mem->nlp_res->inf_norm_res_comp; } // exit conditions on residuals if ((nlp_mem->nlp_res->inf_norm_res_stat < opts->tol_stat) & (nlp_mem->nlp_res->inf_norm_res_eq < opts->tol_eq) & (nlp_mem->nlp_res->inf_norm_res_ineq < opts->tol_ineq) & (nlp_mem->nlp_res->inf_norm_res_comp < opts->tol_comp)) { // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // stop timer total_time += acados_toc(&timer0); // save time nlp_out->total_time = total_time; mem->time_tot = total_time; #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_SUCCESS; if (opts->print_level > 0) { printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat, nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq, nlp_mem->nlp_res->inf_norm_res_comp ); printf("\n\n"); } return mem->status; } // regularize Hessian acados_tic(&timer1); config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // (typically) no warm start at first iteration if (sqp_iter == 0 && !opts->warm_start_first_qp) { int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int); } // solve qp acados_tic(&timer1); qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); mem->time_qp_sol += acados_toc(&timer1); qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time); mem->time_qp_solver_call += tmp_time; qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time); mem->time_qp_xcond += tmp_time; // compute correct dual solution in case of Hessian regularization acados_tic(&timer1); config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // restore default warm start if (sqp_iter==0) { config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &opts->qp_warm_start); } // TODO move into QP solver memory ??? qp_info *qp_info_; ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_); nlp_out->qp_iter = qp_info_->num_iter; // printf("\nqp_iter = %d, sqp_iter = %d, max_sqp_iter = %d\n", nlp_out->qp_iter, sqp_iter, opts->max_iter); qp_iter = qp_info_->num_iter; // save statistics of last qp solver call if (sqp_iter+1 < mem->stat_m) { mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status; mem->stat[mem->stat_n*(sqp_iter+1)+5] = qp_iter; } // compute external QP residuals (for debugging) if (opts->ext_qp_res) { ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws); if (sqp_iter+1 < mem->stat_m) ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6)); } if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { // print_ocp_qp_in(nlp_mem->qp_in); if (opts->print_level > 0) { printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat, nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq, nlp_mem->nlp_res->inf_norm_res_comp ); printf("\n\n"); } // increment sqp_iter to return full statistics and improve output below. sqp_iter++; // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // stop timer total_time += acados_toc(&timer0); // save time mem->time_tot = total_time; nlp_out->total_time = total_time; #ifndef ACADOS_SILENT printf("\nQP solver returned error status %d in SQP iteration %d, QP iteration %d.\n", qp_status, sqp_iter, qp_iter); #endif #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif if (opts->print_level > 1) { printf("\n Failed to solve the following QP:\n"); if (opts->print_level > sqp_iter + 1) print_ocp_qp_in(nlp_mem->qp_in); } mem->status = ACADOS_QP_FAILURE; return mem->status; } // globalization acados_tic(&timer1); double alpha = ocp_nlp_line_search(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_glob += acados_toc(&timer1); // update variables ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work, alpha); // ocp_nlp_dims_print(nlp_out->dims); // ocp_nlp_out_print(nlp_out); // exit(1); // ??? @rien // for (int_t i = 0; i < N; i++) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme == NULL) // continue; // opts->sens_adj = (opts->scheme->type != exact); // if (nlp_in->freezeSens) { // // freeze inexact sensitivities after first SQP iteration !! // opts->scheme->freeze = true; // } // } if (opts->print_level > 0) { if (sqp_iter%10 == 0) { printf("# it\tstat\t\teq\t\tineq\t\tcomp\n"); } printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat, nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq, nlp_mem->nlp_res->inf_norm_res_comp ); } } // stop timer total_time += acados_toc(&timer0); if (opts->print_level > 0) printf("\n\n"); // ocp_nlp_out_print(nlp_out); // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // save time mem->time_tot = total_time; nlp_out->total_time = total_time; // maximum number of iterations reached #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_MAXITER; #ifndef ACADOS_SILENT printf("\n ocp_nlp_sqp: maximum iterations reached\n"); #endif return mem->status; } int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(all) add flag to enable/disable checks for (ii = 0; ii <= N; ii++) { int module_val; config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val); if (dims->ns[ii] != module_val) { printf("ocp_nlp_sqp_precompute: inconsistent dimension ns for stage %d with constraint module, got %d, module: %d.", ii, dims->ns[ii], module_val); exit(1); } } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii], nlp_mem->dynamics[ii], nlp_work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_) { acados_timer timer0; acados_tic(&timer0); ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_out *sens_nlp_out = sens_nlp_out_; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in); d_ocp_qp_set_rhs_zero(work->tmp_qp_in); double one = 1.0; if ((!strcmp("ex", field)) & (stage==0)) { d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in); d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in); // d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in); config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); // d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out); // exit(1); /* copy tmp_qp_out into sens_nlp_out */ int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; for (i = 0; i <= N; i++) { blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0); if (i < N) blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0); } } else { printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_eval_param_sens\n", field, stage); exit(1); } mem->time_solution_sensitivities = acados_toc(&timer0); return; } // TODO rename memory_get ??? void ocp_nlp_sqp_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_) { ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; ocp_nlp_sqp_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = mem->sqp_iter; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field)) { double *value = return_value_; *value = mem->time_qp_solver_call; } else if (!strcmp("time_qp_xcond", field)) { double *value = return_value_; *value = mem->time_qp_xcond; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("time_glob", field)) { double *value = return_value_; *value = mem->time_glob; } else if (!strcmp("time_solution_sensitivities", field)) { double *value = return_value_; *value = mem->time_solution_sensitivities; } else if (!strcmp("time_sim", field)) { double *value = return_value_; *value = mem->time_sim; } else if (!strcmp("time_sim_la", field)) { double *value = return_value_; *value = mem->time_sim_la; } else if (!strcmp("time_sim_ad", field)) { double *value = return_value_; *value = mem->time_sim_ad; } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("statistics", field)) { int n_row = mem->stat_m<mem->sqp_iter+1 ? mem->stat_m : mem->sqp_iter+1; double *value = return_value_; for (int ii=0; ii<n_row; ii++) { value[ii+0] = ii; for (int jj=0; jj<mem->stat_n; jj++) value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n]; } } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else if (!strcmp("nlp_mem", field)) { void **value = return_value_; *value = mem->nlp_mem; } else if (!strcmp("qp_xcond_dims", field)) { void **value = return_value_; *value = dims->qp_solver->xcond_dims; } else if (!strcmp("nlp_res", field)) { ocp_nlp_res **value = return_value_; *value = mem->nlp_mem->nlp_res; } else if (!strcmp("qp_xcond_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_in; } else if (!strcmp("qp_xcond_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_out; } else if (!strcmp("qp_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_in; } else if (!strcmp("qp_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_out; } else if (!strcmp("qp_iter", field)) { config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "iter", return_value_); } else if (!strcmp("res_stat", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_stat; } else if (!strcmp("res_eq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_eq; } else if (!strcmp("res_ineq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_ineq; } else if (!strcmp("res_comp", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_comp; } else if (!strcmp("cost_value", field)) { double *value = return_value_; *value = mem->nlp_mem->cost_value; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field); exit(1); } } void ocp_nlp_sqp_opts_get(void *config_, void *dims_, void *opts_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; if (!strcmp("nlp_opts", field)) { void **value = return_value_; *value = opts->nlp_opts; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_opts_get\n", field); exit(1); } } void ocp_nlp_sqp_work_get(void *config_, void *dims_, void *work_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_workspace *work = work_; if (!strcmp("nlp_work", field)) { void **value = return_value_; *value = work->nlp_work; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_work_get\n", field); exit(1); } } void ocp_nlp_sqp_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_opts_update; config->opts_set = &ocp_nlp_sqp_opts_set; config->opts_set_at_stage = &ocp_nlp_sqp_opts_set_at_stage; config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp; config->eval_param_sens = &ocp_nlp_sqp_eval_param_sens; config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default; config->precompute = &ocp_nlp_sqp_precompute; config->get = &ocp_nlp_sqp_get; config->opts_get = &ocp_nlp_sqp_opts_get; config->work_get = &ocp_nlp_sqp_work_get; return; }
bfm_mixed_solver.h
// -*- mode:c++; c-basic-offset: 4 -*- #ifndef INCLUDED_BFM_MIXED_SOLVER_HT_H #define INCLUDED_BFM_MIXED_SOLVER_HT_H #include <bfm.h> #include <util/lattice/bfm_evo.h> #include <alg/enum_int.h> #include <omp.h> #include <pthread.h> #include <cstdio> #include <cstdlib> #include <unistd.h> #include <sys/stat.h> #include <sys/time.h> namespace mixed_cg { // check if 2 instances of bfm agree on what they are going to do. template < typename Float_out, typename Float_in > inline bool check (bfm_evo < Float_out > &bfm_out, bfm_evo < Float_in > &bfm_in) { if (bfm_out.node_latt[0] != bfm_in.node_latt[0]) return false; if (bfm_out.node_latt[1] != bfm_in.node_latt[1]) return false; if (bfm_out.node_latt[2] != bfm_in.node_latt[2]) return false; if (bfm_out.node_latt[3] != bfm_in.node_latt[3]) return false; if (bfm_out.Ls != bfm_in.Ls) return false; if (bfm_out.precon_5d != bfm_in.precon_5d) return false; return true; } // Convert between single/double precision bfm fermions template < typename Float_out, typename Float_in > inline void threaded_convFermion (Fermion_t out, Fermion_t in, bfm_evo < Float_out > &bfm_out, bfm_evo < Float_in > &bfm_in) { int me = bfm_out.thread_barrier (); if (!check (bfm_out, bfm_in)) { if (bfm_out.isBoss () && !me) { printf ("Output/Input fermions don't match.\n"); } exit (-1); } // Simple copy, this shouldn't be called, right? if (sizeof (Float_out) == sizeof (Float_in)) { bfm_out.copy (out, in); return; } // otherwise, we do the conversion // // Note: this function is running under threaded environment. int Nspinco = 12; int out_i_inc = bfm_out.simd () * 2; int in_i_inc = bfm_in.simd () * 2; int out_lat[5] = { bfm_out.node_latt[0], bfm_out.node_latt[1], bfm_out.node_latt[2], bfm_out.node_latt[3], bfm_out.Ls }; int vol5d_out = out_lat[0] * out_lat[1] * out_lat[2] * out_lat[3] * out_lat[4]; int thrlen, throff; bfm_out.thread_work_nobarrier (vol5d_out, me, thrlen, throff); Float_out *outf = (Float_out *) out; Float_in *inf = (Float_in *) in; for (int site = throff; site < throff + thrlen; ++site) { int x[4], s; int si = site; x[0] = si % out_lat[0]; si = si / out_lat[0]; x[1] = si % out_lat[1]; si = si / out_lat[1]; x[2] = si % out_lat[2]; si = si / out_lat[2]; x[3] = si % out_lat[3]; s = si / out_lat[3]; // both in and out must have the same preconditioning scheme. int sp = bfm_out.precon_5d ? s : 0; if ((x[0] + x[1] + x[2] + x[3] + sp & 0x1) == 1) { if (!cps::GJP.Gparity ()) { int out_base = bfm_out.bagel_idx5d (x, s, 0, 0, Nspinco, 1); int in_base = bfm_in.bagel_idx5d (x, s, 0, 0, Nspinco, 1); for (int co = 0; co < Nspinco; co++) { for (int reim = 0; reim < 2; reim++) { int out_id = out_base + reim + co * out_i_inc; int in_id = in_base + reim + co * in_i_inc; outf[out_id] = inf[in_id]; }} //co,reim } else { #ifndef BFM_GPARITY printf("Compiled with BFM without Gparity\n");exit(-43); #else //G-parity checkerboard ordering stacks the second flavour after the first on each checkerboard : cb0[f0 f1]cb1[f0 f1] #ifdef BFM_GPARITY int out_base[2] = { bfm_out.bagel_idx5d (x, s, 0, 0, Nspinco, 1, 0), bfm_out.bagel_idx5d (x, s, 0, 0, Nspinco, 1, 1) }; int in_base[2] = { bfm_in.bagel_idx5d (x, s, 0, 0, Nspinco, 1, 0), bfm_in.bagel_idx5d (x, s, 0, 0, Nspinco, 1, 1) }; # else int out_base[2] = { bfm_out.bagel_gparity_idx5d (x, s, 0, 0, Nspinco, 1, 0), bfm_out.bagel_gparity_idx5d (x, s, 0, 0, Nspinco, 1, 1) }; int in_base[2] = { bfm_in.bagel_gparity_idx5d (x, s, 0, 0, Nspinco, 1, 0), bfm_in.bagel_gparity_idx5d (x, s, 0, 0, Nspinco, 1, 1) }; # endif for (int flav = 0; flav < 2; flav++) { for (int co = 0; co < Nspinco; co++) { for (int reim = 0; reim < 2; reim++) { int out_id = out_base[flav] + reim + co * out_i_inc; int in_id = in_base[flav] + reim + co * in_i_inc; outf[out_id] = inf[in_id]; }} //co,reim } //flav #endif } } //cb } //xyzts } // Convert between single/double precision bfm fermions // CK: And do it quickly! The original takes as long as an Mprec! template < typename Float_out, typename Float_in > inline void threaded_convFermion_fast (Fermion_t out, Fermion_t in, bfm_evo < Float_out > &bfm_out, bfm_evo < Float_in > &bfm_in) { // Simple copy, this shouldn't be called, right? if (sizeof (Float_out) == sizeof (Float_in)) { return bfm_out.copy (out, in); } #if 0 const static int nspinco = 12; int me, thrlen, throff; int work = (bfm_out.gparity ? 2 : 1) * bfm_out.cbLs * bfm_out.simd_cbvol * bfm_out.nsimd * nspinco * 2; bfm_out.thread_work (work, me, thrlen, throff); Float_in *x = (Float_in *) in + throff; Float_out *y = (Float_out *) out + throff; for (int s = 0; s < thrlen; ++s) y[s] = x[s]; bfm_out.thread_barrier (); #else //Use bfm-3.2 (imported) precisionChange method if (sizeof (Float_out) == sizeof (double)) bfm_out.precisionChange (in, out, SingleToDouble, 0); else bfm_out.precisionChange (in, out, DoubleToSingle, 0); #endif } // Reinitialize communication subsystem. // Check bfmcommspi.C in the bfm package to see if this can be avoided. template < typename Float_new, typename Float_old > inline void switch_comm (bfm_evo < Float_new > &bfm_new, bfm_evo < Float_old > &bfm_old) { if (static_cast < void *>(&bfm_new) == static_cast < void *>(&bfm_old)) return; int me = bfm_old.thread_barrier (); if (!me) { bfm_old.comm_end (); bfm_new.comm_init (); // Question: how do we propagate the reinitialized information // to other threads? // Answer: thread barrier does this. } bfm_new.thread_barrier (); } // Both sol and src are double precision fermions. Single precision // solver is only used internally. // // Things to be set before using this function: // // double precision solver mass, stopping condition, max iteration // number. // // single precision solver mass, stopping condition, max iteration // number. // // Gauge field must be initialized for both double and single prec // solvers. // // the communication subsystem must be ready for bfm_d to use (due to // the way bfmcommspi is written, one must reinitialize the // communication object when switching between single and double // precisions). // // max_cycle: the maximum number of restarts will be performed. // N is the number of low modes removed (subtracted) from the final solution. All evecs will be used for the deflated solve, this just makes the solution a 'high-mode' solution for use in A2A propagators. // //EIGENVECTORS SHOULD BE SINGLE PRECISION! inline int threaded_cg_mixed_MdagM (Fermion_t sol, Fermion_t src, bfm_evo < double >&bfm_d, bfm_evo < float >&bfm_f, int max_cycle, cps::InverterType itype = cps::CG, // the following parameters are for deflation multi1d < Fermion_t[2] > *evec = NULL, multi1d < float >*eval = NULL, int N = 0) { int me = bfm_d.thread_barrier (); if (bfm_f.isBoss () && !me) { printf ("cg_mixed_MdagM: bfm_d.CGdiagonalMee = %d, bfm_f.CGdiagonalMee = %d\n", bfm_d.CGdiagonalMee, bfm_f.CGdiagonalMee); } double frsd = bfm_f.residual; Fermion_t src_d = bfm_d.threadedAllocFermion (); Fermion_t tv1_d = bfm_d.threadedAllocFermion (); Fermion_t tv2_d = bfm_d.threadedAllocFermion (); Fermion_t sol_f = bfm_f.threadedAllocFermion (); Fermion_t src_f = bfm_f.threadedAllocFermion (); double src_norm = bfm_d.norm (src); double stop = src_norm * bfm_d.residual * bfm_d.residual; if (bfm_f.isBoss () && !me) { printf ("cg_mixed_MdagM: src_norm = %17.10e\n", src_norm); } int iter = 0; for (int i = 0; i < max_cycle; ++i) { // compute double precision rsd and also new RHS vector. bfm_d.Mprec (sol, tv1_d, src_d, 0, 0); bfm_d.Mprec (tv1_d, tv2_d, src_d, 1, 0); // tv2latt_fbfm.bf._d = MdagM * sol double norm = bfm_d.axpy_norm (src_d, tv2_d, src, -1.); if (bfm_f.isBoss () && !me) { printf ("CPS cg_mixed_MdagM: iter = %d rsd = %17.10e(d) stop = %17.10e(d)\n", i, norm, stop); } // my ad hoc stopping condition if ((i < (max_cycle - 1)) && (norm < 100. * stop)) break; // will this cause a deadlock when combined with the // condition above? i.e., will we lose a factor of huge // factor in the accuracy of rsd when converting from // single to double? if (!me) while (norm * bfm_f.residual * bfm_f.residual < stop) bfm_f.residual *= 2; //bfm_d.thread_barrier(); //not needed as next line has a barrier at the beginning // bfm_f.residual = sqrt(stop/norm); threaded_convFermion (src_f, src_d, bfm_f, bfm_d); switch_comm (bfm_f, bfm_d); bfm_f.set_zero (sol_f); switch (itype) { case cps::CG: if (evec && eval && (*eval).size () > 0) { //CK: NOTE it is the single-precision bfm instance doing the deflation. All of its linalg assumes then single precision fermions, *including the eigenvectors* if (bfm_f.isBoss () && !me) printf ("bfm_evo::deflating with %d eigen vectors.\n", (*eval).size ()); bfm_f.deflate (sol_f, src_f, evec, eval, (*eval).size ()); } iter += bfm_f.CGNE_prec_MdagM (sol_f, src_f); break; case cps::EIGCG: iter += bfm_f.Eig_CGNE_prec (sol_f, src_f); break; default: if (bfm_f.isBoss () && !me) { printf ("cg_mixed_MdagM: unsupported inverter type.\n"); } exit (-1); } switch_comm (bfm_d, bfm_f); threaded_convFermion (tv1_d, sol_f, bfm_d, bfm_f); bfm_d.axpy (sol, tv1_d, sol, 1.); } iter += bfm_d.CGNE_prec_MdagM (sol, src); if (N > 0) { // Subtract N low modes from final sol, usually for all to all propagators. // TODO is it legal to only use the single precision eval? threaded_convFermion (src_f, src, bfm_f, bfm_d); bfm_f.deflate (sol_f, src_f, evec, eval, N); threaded_convFermion (tv1_d, sol_f, bfm_d, bfm_f); bfm_d.axpy (sol, tv1_d, sol, -1.); } bfm_d.threadedFreeFermion (src_d); bfm_d.threadedFreeFermion (tv1_d); bfm_d.threadedFreeFermion (tv2_d); bfm_f.threadedFreeFermion (sol_f); bfm_f.threadedFreeFermion (src_f); double sol_norm = bfm_d.norm (sol); if (bfm_d.isBoss () && !me) { printf ("cg_mixed_MdagM: final sol norm = %17.10e ; final iter count = %d\n", sol_norm, iter); } bfm_f.residual = frsd; return iter; } // Not implemented for older BFM //#ifdef BFM_GPARITY #if 1 inline int threaded_cg_mixed_MMdag (Fermion_t sol, Fermion_t src, bfm_evo < double >&bfm_d, bfm_evo < float >&bfm_f, int max_cycle, cps::InverterType itype = cps::CG, // the following parameters are for deflation multi1d < Fermion_t[2] > *evec = NULL, multi1d < float >*eval = NULL, int N = 0) { int me = bfm_d.thread_barrier (); if (bfm_f.isBoss () && !me) { printf ("cg_mixed_MMdag: bfm_d.CGdiagonalMee = %d, bfm_f.CGdiagonalMee = %d\n", bfm_d.CGdiagonalMee, bfm_f.CGdiagonalMee); } double frsd = bfm_f.residual; Fermion_t src_d = bfm_d.threadedAllocFermion (); Fermion_t tv1_d = bfm_d.threadedAllocFermion (); Fermion_t tv2_d = bfm_d.threadedAllocFermion (); Fermion_t sol_f = bfm_f.threadedAllocFermion (); Fermion_t src_f = bfm_f.threadedAllocFermion (); double src_norm = bfm_d.norm (src); double stop = src_norm * bfm_d.residual * bfm_d.residual; if (bfm_f.isBoss () && !me) { printf ("cg_mixed_MMdag: src_norm = %17.10e\n", src_norm); } int iter = 0; for (int i = 0; i < max_cycle; ++i) { // compute double precision rsd and also new RHS vector. bfm_d.Mprec (sol, tv1_d, src_d, 1, 0); bfm_d.Mprec (tv1_d, tv2_d, src_d, 0, 0); // tv2_d = MMdag * sol double norm = bfm_d.axpy_norm (src_d, tv2_d, src, -1.); if (bfm_f.isBoss () && !me) { printf ("cg_mixed_MMdag: iter = %d rsd = %17.10e(d) stop = %17.10e(d)\n", i, norm, stop); } // my ad hoc stopping condition if (norm < 100. * stop) break; // will this cause a deadlock when combined with the // condition above? i.e., will we lose a factor of huge // factor in the accuracy of rsd when converting from // single to double? while (norm * bfm_f.residual * bfm_f.residual < stop) bfm_f.residual *= 2; threaded_convFermion (src_f, src_d, bfm_f, bfm_d); switch_comm (bfm_f, bfm_d); bfm_f.set_zero (sol_f); switch (itype) { case cps::CG: if (evec && eval && N) { bfm_f.deflate (sol_f, src_f, evec, eval, N); } iter += bfm_f.CGNE_prec_MMdag (sol_f, src_f); break; /*case cps::EIGCG: iter += bfm_f.Eig_CGNE_prec(sol_f, src_f); break; */ default: if (bfm_f.isBoss () && !me) { printf ("cg_mixed_MMdag: unsupported inverter type.\n"); } exit (-1); } switch_comm (bfm_d, bfm_f); threaded_convFermion (tv1_d, sol_f, bfm_d, bfm_f); bfm_d.axpy (sol, tv1_d, sol, 1.); } bfm_d.threadedFreeFermion (src_d); bfm_d.threadedFreeFermion (tv1_d); bfm_d.threadedFreeFermion (tv2_d); bfm_f.threadedFreeFermion (sol_f); bfm_f.threadedFreeFermion (src_f); iter += bfm_d.CGNE_prec_MMdag (sol, src); double sol_norm = bfm_d.norm (sol); if (bfm_d.isBoss () && !me) { printf ("cg_mixed_MMdag: final sol norm = %17.10e ; final iter count = %d\n", sol_norm, iter); } bfm_f.residual = frsd; bfm_f.thread_barrier (); //make sure no threads are waiting to write to bfm_f.residual while others have moved onto something else that potentially changes bfm_f.residual return iter; } #endif //#ifdef BFM_GPARITY // apply single precision solver to double precision vectors. Both // sol_d and src_d are in double precision. // // sol_f and src_f are auxiliary fermions in single // precision. Their content will be overriden after calling this // function. // // If import_guess == true, then we import sol_d as an initial // guess, other we do a zero start CG. inline int cg_single_prec (Fermion_t sol_d, Fermion_t src_d, Fermion_t sol_f, Fermion_t src_f, bfm_evo < double >&bfm_d, bfm_evo < float >&bfm_f) { threaded_convFermion (src_f, src_d, bfm_f, bfm_d); switch_comm (bfm_f, bfm_d); bfm_f.set_zero (sol_f); int iter = bfm_f.CGNE_prec_MdagM (sol_f, src_f); switch_comm (bfm_d, bfm_f); threaded_convFermion (sol_d, sol_f, bfm_d, bfm_f); return iter; } // cg_MdagM_single_precnd: Nested CG, single precision solver is // used as a preconditioner. // // Calling interface is the same as threaded_cg_mixed_MdagM(). inline int cg_MdagM_single_precnd (Fermion_t sol, Fermion_t src, bfm_evo < double >&bfm_d, bfm_evo < float >&bfm_f) { int me = bfm_d.thread_barrier (); double frsd = bfm_f.residual; Fermion_t r = bfm_d.threadedAllocFermion (); Fermion_t minvr = bfm_d.threadedAllocFermion (); Fermion_t d = bfm_d.threadedAllocFermion (); Fermion_t ad = bfm_d.threadedAllocFermion (); Fermion_t aad = bfm_d.threadedAllocFermion (); Fermion_t tv1 = bfm_d.threadedAllocFermion (); Fermion_t sol_f = bfm_f.threadedAllocFermion (); Fermion_t src_f = bfm_f.threadedAllocFermion (); const double src_norm = bfm_d.norm (src); const double stop = src_norm * bfm_d.residual * bfm_d.residual; int iter_s = 0; bfm_d.Mprec (sol, ad, tv1, 0, 0); bfm_d.Mprec (ad, aad, tv1, 1, 0); // aad = MdagM * x0 (double prec) bfm_d.axpy (r, aad, src, -1.0); // r0 = b - MdagM * x0 iter_s += cg_single_prec (minvr, r, sol_f, src_f, bfm_d, bfm_f); bfm_d.copy (d, minvr); // d0 = (M'dagM')^(-1) * r0 double rtminvr = bfm_d.inner_real (r, minvr); int k = 1; for (; k <= bfm_d.max_iter; ++k) { double dtad = bfm_d.Mprec (d, ad, tv1, 0, 1); bfm_d.Mprec (ad, aad, tv1, 1, 0); // aad = MdagM * d[k] (double prec) double alpha = rtminvr / dtad; bfm_d.axpy (sol, d, sol, alpha); double rsd = bfm_d.axpy_norm (r, aad, r, -alpha); // check watch file FILE *fp = fopen ("stop.file", "r"); if (fp) { fclose (fp); printf ("Found watchfile stop.file\n"); return 1; } // check stopping condition if (rsd < stop) { // compute true residual bfm_d.Mprec (sol, ad, tv1, 0, 0); bfm_d.Mprec (ad, aad, tv1, 1, 0); double true_rsd = bfm_d.axpy_norm (tv1, aad, src, -1.0); if (bfm_d.isBoss () && !me) { printf ("cg_MdagM_single_precnd: converged in %d(d)+%d(s) iterations.\n", k, iter_s); printf ("cg_MdagM_single_precnd: true residual = %17.10e.\n", sqrt (true_rsd / src_norm)); } break; } iter_s += cg_single_prec (minvr, r, sol_f, src_f, bfm_d, bfm_f); double tmp = bfm_d.inner_real (r, minvr); double beta = tmp / rtminvr; rtminvr = tmp; bfm_d.axpby (d, minvr, d, 1.0, beta); } if (k > bfm_d.max_iter) { if (bfm_d.isBoss () && !me) { printf ("cg_MdagM_single_precnd: CG not converged in %d(d)+%d(s) iterations.\n", k, iter_s); } } bfm_d.threadedFreeFermion (r); bfm_d.threadedFreeFermion (minvr); bfm_d.threadedFreeFermion (d); bfm_d.threadedFreeFermion (ad); bfm_d.threadedFreeFermion (aad); bfm_d.threadedFreeFermion (tv1); bfm_f.threadedFreeFermion (sol_f); bfm_f.threadedFreeFermion (src_f); bfm_f.residual = frsd; //bfm_f.thread_barrier(); //no need, barrier in next call bfm_d.CGNE_prec_MdagM (sol, src); return k + iter_s; } inline int threaded_cg_mixed_M (Fermion_t sol[2], Fermion_t src[2], bfm_evo < double >&bfm_d, bfm_evo < float >&bfm_f, int max_cycle, cps::InverterType itype = cps::CG, // the following parameters are for deflation multi1d < Fermion_t[2] > *evec = NULL, multi1d < float >*eval = NULL, int N = 0) { int me = bfm_d.thread_barrier (); Fermion_t be = bfm_d.threadedAllocFermion (); Fermion_t bo = bfm_d.threadedAllocFermion (); Fermion_t ta = bfm_d.threadedAllocFermion (); Fermion_t tb = bfm_d.threadedAllocFermion (); double nsrc = bfm_d.norm (src[0]) + bfm_d.norm (src[1]); if (bfm_d.isBoss () && !me) { printf ("threaded_cg_mixed_M: source norm is %17.10e\n", nsrc); } // eo preconditioning bfm_d.MooeeInv (src[Even], ta, DaggerNo); bfm_d.Meo (ta, tb, Odd, DaggerNo); // tb == Moe Mee^{-1} src[e] bfm_d.axpy (ta, tb, src[Odd], -1.0); bfm_d.Mprec (ta, bo, tb, DaggerYes); // bo = Mprec^dag (src[o] - Moe Mee^{-1} src[e]) int iter = threaded_cg_mixed_MdagM (sol[Odd], bo, bfm_d, bfm_f, max_cycle, itype, evec, eval, N); bfm_d.Meo (sol[Odd], ta, Even, DaggerNo); bfm_d.axpy (tb, ta, src[Even], -1.0); bfm_d.MooeeInv (tb, sol[Even], DaggerNo); double nsol = bfm_d.norm (sol[0]) + bfm_d.norm (sol[1]); // compute final residual Fermion_t tmp[2] = { be, bo }; bfm_d.Munprec (sol, tmp, ta, DaggerNo); double ndiff = 0.; for (int i = 0; i < 2; ++i) { bfm_d.axpy (tb, tmp[i], src[i], -1.0); ndiff += bfm_d.norm (tb); } if (bfm_d.isBoss () && !me) { printf ("threaded_cg_mixed_M: unprec sol norm = %17.10e, residual = %17.10e\n", nsol, sqrt (ndiff / nsrc)); } bfm_d.threadedFreeFermion (be); bfm_d.threadedFreeFermion (bo); bfm_d.threadedFreeFermion (ta); bfm_d.threadedFreeFermion (tb); return iter; } inline double sigma_sum_recurse (const int &i, const int &start, const int &N, double shifts[], const int &nprod_remaining) { double out = 0.0; for (int j = start; j < N; j++) { if (j == i) continue; //skip i double toadd = shifts[j]; if (nprod_remaining > 1) { toadd *= sigma_sum_recurse (i, j + 1, N, shifts, nprod_remaining - 1); } out += toadd; } return out; } inline double sigma_prod (const int &i, const int &n, const int &N, double shifts[]) { if (n == N - 1) return 1.0; int prod_size = N - 1 - n; return sigma_sum_recurse (i, 0, N, shifts, prod_size); } /*CK: Implementation of multi-shift with guesses from J.~C.~Osborn, ``Initial guesses for multi-shift solvers,'' PoS LATTICE {\bf 2008} (2008) 029 [arXiv:0810.1081 [hep-lat]]. */ template < typename Float > int CGNE_prec_MdagM_multi_shift_with_guesses (Fermion_t psi[], Fermion_t src, Fermion_t guesses[], double mass[], double alpha[], int nshift, const double mresidual_in[], int single, bfm_evo < Float > &bfm) { //'single' appears to sum all the solutions into psi[0] int me = bfm.thread_barrier (); //Threads take a local copy of mresidual_in[] so wno worries about race conditions double mresidual[nshift]; for (int i = 0; i < nshift; ++i) mresidual[i] = mresidual_in[i]; //Form combination of guesses that results in a new source y in the common Krylov space // w = \sum_i c_i guess_i where c_i = \Prod_{j!=i} 1/(shift[j]-shift[i]) Fermion_t w = bfm.threadedAllocFermion (); bfm.set_zero (w); for (int i = 0; i < nshift; i++) { double c_i = 1.0; for (int j = 0; j < nshift; j++) { if (j == i) continue; c_i *= 1.0 / (mass[j] - mass[i]); } bfm.axpy (w, guesses[i], w, c_i); if (bfm.isBoss () && (!me)) printf ("CGNE_prec_MdagM_multi_shift_with_guesses: c_%d = %e\n", i, c_i); } if (bfm.isBoss () && (!me)) printf ("CGNE_prec_MdagM_multi_shift_with_guesses: w norm = %e\n", bfm.norm (w)); //Form y_i = Prod_{j!=i} (MMdag + shift[j]) w these are all in the same Krylov space Fermion_t y[nshift]; for (int i = 0; i < nshift; i++) { y[i] = bfm.threadedAllocFermion (mem_fast); bfm.set_zero (y[i]); } //We can multiply out. Let MMdag = B //y_i = Prod_{j!=i, 0<=j<=N-1} (MMdag + shift[j]) w // = B^{N-1}w // + (sum_{j!=i, j<=N-1} shift[j])B^{N-2}w // + sum_{j!=i, j<=N-2} shift[j] * ( sum_{k!=i,k>j, k<=N-1} shift[k] ) B^{N-3}w // +... //The coefficient of a term B^n is the sum of all the unique products of (N-1)-n elements of the set of shifts excluding shift[i]: { shift[0], shift[1],.. excl shift[i].... shift[N-2], shift[N-1] } //We take a product of size zero to have value 1.0 (as we would with factorials). Indices all start from 0 and end at N-1. //For example //n=N-1 has coefficent 1.0, which is the product of (N-1)-(N-1)=0 shifts //n=N-2 is the sum of all products comprising 1 shift: sum_{j!=i, j<=N-1} shift[j], which //n=N-3 is the sum of all products of 2 shifts: shift[0]*(shift[1] + shift[2] + ... shift[N-1]) + shift[1]*(shift[2] + shift[3] + ... shift[N-1]) + ... + shift[N-2]*shift[N-1] // = sum_{j!=i, j<=N-2} shift[j] * ( sum_{k!=i,k>j;k<=N-1} shift[k] ) //etc... //n=0 is the product of all shifts bar i //Note, we contain the running product of MMdag in w: w_n+1 = MMdag w_n and definining w_0 = w Fermion_t tmp = bfm.threadedAllocFermion (mem_fast); Fermion_t tmp2 = bfm.threadedAllocFermion (mem_fast); for (int n = 0; n < nshift; n++) { //last term MMdag^{nshift-1} //At this w = MMdag^n w for (int i = 0; i < nshift; i++) { double coeff_i = sigma_prod (i, n, nshift, mass); if (bfm.isBoss () && (!me)) printf ("CGNE_prec_MdagM_multi_shift_with_guesses: n=%d i=%d sum_prod=%e \n", n, i, coeff_i); bfm.axpy (y[i], w, y[i], coeff_i); // y[i] += coeff[i] * MMdag^n w } bfm.Mprec (w, tmp, tmp2, DaggerNo); //tmp = M w bfm.Mprec (tmp, w, tmp2, DaggerYes); //w = Mdag tmp } Fermion_t r = bfm.threadedAllocFermion (mem_fast); //Also need r = src - Prod_j (MMdag + shift[j]) w // = src - (MMdag + shift[i]) Prod_{j!=i, j<=N-1} (MMdag + shift[j]) w for any i //Use i=N-1-1: // = src - (MMdag + shift[N-1]) Prod_{j!=i, j<=N-1} (MMdag + shift[j]) w // = src - (MMdag + shift[N-1]) y[N-1] bfm.Mprec (y[nshift - 1], tmp, tmp2, DaggerNo); bfm.Mprec (tmp, r, tmp2, DaggerYes); //after this, r = MMdag y[N-1] bfm.axpy (r, y[nshift - 1], r, mass[nshift - 1]); // r = (MMdag + shift[N-1])y[N-1] bfm.axpy (r, r, src, -1.0); // r = src - (MMdag + shift[N-1])y[N-1] //run standard multi-shift with r as the source //To achieve the desired residual on the solution we need to modify the residuals to reflect the difference in source norm //In the multi-mass solve we are trying to emulate: //|resid|^2 = |orig src|^2 * (orig mresidual)^2 //Keeping this fixed requires so we want //(new mresidual) = sqrt( |orig src|^2 * (orig mresidual)^2 / |new src|^2 ) Float orig_src_norm2 = bfm.norm (src); Float new_src_norm2 = bfm.norm (r); for (int i = 0; i < nshift; i++) mresidual[i] *= sqrt (orig_src_norm2 / new_src_norm2); int iter = bfm.CGNE_prec_MdagM_multi_shift (psi, r, mass, alpha, nshift, mresidual, 0); //Add the solutions to the initial guess vectors y for (int n = 0; n < nshift; n++) bfm.axpy (psi[n], psi[n], y[n], 1.0); // Check answers if (bfm.isBoss () && (!me)) printf ("bfm::CGNE_prec_MdagM_multi_shift_with_guesses: Checking solutions\n"); for (int s = 0; s < nshift; s++) { bfm.Mprec (psi[s], tmp, tmp2, DaggerNo); bfm.Mprec (tmp, w, tmp2, DaggerYes); //reuse w, now w=MMdag psi[s] bfm.axpy (w, psi[s], w, mass[s]); // w += mass[s]*psi[s] bfm.axpy (r, w, src, -1); // r = src - (MMdag+mass[s])*psi[s] double rn = bfm.norm (r); double cn = bfm.norm (src); if (bfm.isBoss () && !me) { printf ("bfm::CGNE_prec_MdagM_multi_shift_with_guesses: shift[%d] true residual %le \n", s, sqrt (rn / cn)); } } if (single) { for (int s = 1; s < nshift; s++) { bfm.axpy (psi[0], psi[s], psi[0], 1.0); } } bfm.threadedFreeFermion (tmp); bfm.threadedFreeFermion (tmp2); bfm.threadedFreeFermion (w); bfm.threadedFreeFermion (r); for (int i = 0; i < nshift; i++) { bfm.threadedFreeFermion (y[i]); } return iter; } inline double sigma_sum_recurse_2 (const int &i, const int &k, const int &start, const int &N, double shifts[], const int &nprod_remaining) { double out = 0.0; for (int j = start; j < N; j++) { if (j == i || j == k) continue; //skip i,k double toadd = shifts[j]; if (nprod_remaining > 1) { toadd *= sigma_sum_recurse_2 (i, k, j + 1, N, shifts, nprod_remaining - 1); } out += toadd; } return out; } inline double sigma_prod_2 (const int &i, const int &k, const int &n, const int &N, double shifts[]) { if (n == N - 2) return 1.0; int prod_size = N - 2 - n; return sigma_sum_recurse_2 (i, k, 0, N, shifts, prod_size); } //CK: Multi-mass with multiple sources, using the method in the Osborn paper cited above template < typename Float > int CGNE_prec_MdagM_multi_shift_multi_src (Fermion_t psi[], Fermion_t src[], double mass[], double alpha[], int nshift, const double mresidual_in[], int single, bfm_evo < Float > &bfm) { //'single' appears to sum all the solutions into psi[0] int me = bfm.thread_barrier (); //Not know if mresidual_in is shared or not. To be safe , threads each take a local copy of the residuals that they can //all simultaneously modify without worrying about race conditions double mresidual[nshift]; for (int i = 0; i < nshift; ++i) mresidual[i] = mresidual_in[i]; //Here we need to form y_i = \sum_{k\neq i} [ \prod_{j\neq i,k} (A + \sigma_j)/(\sigma_j-\sigma_k) ] (b_i - b_k)/(\sigma_i-\sigma_k) // = \sum_{k\neq i} [ \prod_{j\neq i,k} (A + \sigma_j) ] \prod_{j\neq k} 1/(\sigma_j-\sigma_k) (b_i - b_k) // = \sum_{k\neq i} [ \prod_{j\neq i,k} (A + \sigma_j) ] c_k (b_i - b_k) //where c_k = \prod_{j\neq k} 1/(\sigma_j-\sigma_k) is calculated beforehand Fermion_t y[nshift]; Fermion_t Apowm_src[nshift]; double c[nshift]; for (int i = 0; i < nshift; i++) { y[i] = bfm.threadedAllocFermion (mem_fast); Apowm_src[i] = bfm.threadedAllocFermion (mem_fast); bfm.copy (Apowm_src[i], src[i]); bfm.set_zero (y[i]); //Calculate coefficients c_i c[i] = 1.0; for (int j = 0; j < nshift; j++) { if (j == i) continue; c[i] *= 1.0 / (mass[j] - mass[i]); } } //I don't think we can avoid doing ~nshift^2 matrix multiplications, but I believe we can avoid storing nshift^2 vectors Fermion_t tmp = bfm.threadedAllocFermion (mem_fast); Fermion_t tmp2 = bfm.threadedAllocFermion (mem_fast); //y_i = \sum_{k\neq i} [ \prod_{j\neq i,k} (A + \sigma_j) ] c_k (b_i - b_k) // = \sum_{k\neq i} c_k { [ \prod_{j\neq i,k} (A + \sigma_j) ] b_i - [ \prod_{j\neq i,k} (A + \sigma_j) ] b_k } //These objects are the fundamental elements \prod_{j\neq i,k} (A + \sigma_j) ] b_i //Multiplying out = A^{N-2} + A^{N-3}\sum_{j\neq i,k} \sigma_j + A^{N-4} \sum_{j\neq i,k} \sigma_j \sum_{l>j, l\neq i,k} \sigma_l + ..... for (int m = 0; m < nshift - 1; m++) { //last term MMdag^{nshift-2} for (int i = 0; i < nshift; i++) { for (int k = 0; k < nshift; k++) { //\sum_{k\neq i} if (k == i) continue; double coeff = c[k] * sigma_prod_2 (i, k, m, nshift, mass); bfm.axpy (y[i], Apowm_src[i], y[i], coeff); bfm.axpy (y[i], Apowm_src[k], y[i], -coeff); } } for (int n = 0; n < nshift; n++) { bfm.Mprec (Apowm_src[n], tmp, tmp2, DaggerNo); //tmp = M w bfm.Mprec (tmp, Apowm_src[n], tmp2, DaggerYes); //w = Mdag tmp } } Fermion_t r = bfm.threadedAllocFermion (mem_fast); //Also need r = src_i - (A+\sigma_i)y_i for any i bfm.Mprec (y[nshift - 1], tmp, tmp2, DaggerNo); bfm.Mprec (tmp, r, tmp2, DaggerYes); bfm.axpy (r, y[nshift - 1], r, mass[nshift - 1]); bfm.axpy (r, r, src[nshift - 1], -1.0); //run standard multi-shift with r as the source //To achieve the desired residual on the solution we need to modify the residuals to reflect the difference in source norm //In the multi-mass solve we are trying to emulate: //|resid|^2 = |orig src|^2 * (orig mresidual)^2 //Keeping this fixed requires so we want //(new mresidual) = sqrt( |orig src|^2 * (orig mresidual)^2 / |new src|^2 ) Float new_src_norm2 = bfm.norm (r); for (int i = 0; i < nshift; i++) { Float orig_src_norm2 = bfm.norm (src[i]); if (bfm.isBoss () && !me) printf ("bfm::CGNE_prec_MdagM_multi_src: input src[%d] norm2 %le\n", i, orig_src_norm2); mresidual[i] *= sqrt (orig_src_norm2 / new_src_norm2); } if (bfm.isBoss () && !me) printf ("bfm::CGNE_prec_MdagM_multi_src: r norm2 %le\n", new_src_norm2); int iter = bfm.CGNE_prec_MdagM_multi_shift (psi, r, mass, alpha, nshift, mresidual, 0); //Add the solutions to the initial guess vectors y for (int n = 0; n < nshift; n++) bfm.axpy (psi[n], psi[n], y[n], 1.0); // Check answers if (bfm.isBoss () && (!me)) printf ("bfm::CGNE_prec_MdagM_multi_src: Checking solutions\n"); for (int s = 0; s < nshift; s++) { bfm.Mprec (psi[s], tmp, tmp2, DaggerNo); bfm.Mprec (tmp, Apowm_src[0], tmp2, DaggerYes); //reuse Apowm_src[0], now w=MMdag psi[s] bfm.axpy (Apowm_src[0], psi[s], Apowm_src[0], mass[s]); // Apowm_src[0] += mass[s]*psi[s] bfm.axpy (r, Apowm_src[0], src[s], -1); // r = src - (MMdag+mass[s])*psi[s] double rn = bfm.norm (r); double cn = bfm.norm (src[s]); if (bfm.isBoss () && !me) { printf ("bfm::CGNE_prec_MdagM_multi_src: shift[%d] true residual %le \n", s, sqrt (rn / cn)); } } if (single) { for (int s = 1; s < nshift; s++) { bfm.axpy (psi[0], psi[s], psi[0], 1.0); } } bfm.threadedFreeFermion (tmp); bfm.threadedFreeFermion (tmp2); bfm.threadedFreeFermion (r); for (int i = 0; i < nshift; i++) { bfm.threadedFreeFermion (Apowm_src[i]); bfm.threadedFreeFermion (y[i]); } return iter; } template < typename Float > inline void MdagMplusShift (Fermion_t in, Fermion_t out, const double &shift, Fermion_t tmp1, Fermion_t tmp2, bfm_evo < Float > &bfm) { bfm.Mprec (in, tmp1, tmp2, DaggerNo); bfm.Mprec (tmp1, out, tmp2, DaggerYes); bfm.axpy (out, in, out, shift); } //CK: mixed precision multi-mass using multiple single precision restarted inner loop. // Does not work very well because the rediduals get very large such that the required stopping conditions // are less than single precision accuracy // Both sol and src are double precision fermions. Single precision // solver is only used internally. // mass, alpha and nshift as usual // dresidual are the target residuals // fresidual are the initial single precision residuals, which are dynamically modified during the solve // // Things to be set before using this function: // // double precision solver mass, max iteration // number. // // single precision solver mass, max iteration // number. // // Gauge field must be initialized for both double and single prec // solvers. // // the communication subsystem must be ready for bfm_d to use (due to // the way bfmcommspi is written, one must reinitialize the // communication object when switching between single and double // precisions). // // max_cycle: the maximum number of restarts will be performed. inline int threaded_cg_mixed_restarted_multi_shift_MdagM (Fermion_t sol[], Fermion_t src, double mass[], double alpha[], int nshift, const double dresidual[], const double fresidual_in[], int single, bfm_evo < double >&bfm_d, bfm_evo < float >&bfm_f, int max_cycle) { int me = bfm_d.thread_barrier (); double fresidual[nshift]; for (int i = 0; i < nshift; ++i) fresidual[i] = fresidual_in[i]; //local thread copy that can be modified freely Fermion_t src_d = bfm_d.threadedAllocFermion (); Fermion_t tv1_d = bfm_d.threadedAllocFermion (); Fermion_t tv2_d = bfm_d.threadedAllocFermion (); double src_norm = bfm_d.norm (src); //Source and solution locations for single precision input and output Fermion_t sol_f[nshift]; Fermion_t src_f[nshift]; int finished[nshift]; double stop[nshift]; for (int n = 0; n < nshift; n++) { sol_f[n] = bfm_f.threadedAllocFermion (); src_f[n] = bfm_f.threadedAllocFermion (); finished[n] = 0; stop[n] = src_norm * dresidual[n] * dresidual[n]; } //Do an initial single precision solve with regular multi-mass solver using input residuals if (bfm_f.isBoss () && !me) printf ("threaded_cg_mixed_restarted_multi_shift_MdagM: Doing initial single precision solve\n"); { threaded_convFermion (src_f[0], src, bfm_f, bfm_d); switch_comm (bfm_f, bfm_d); bfm_f.CGNE_prec_MdagM_multi_shift (sol_f, src_f[0], mass, alpha, nshift, fresidual, 0); for (int n = 0; n < nshift; n++) threaded_convFermion (sol[n], sol_f[n], bfm_d, bfm_f); switch_comm (bfm_d, bfm_f); } //Perform restarted multi-mass until the double prec residual meets the target if (bfm_f.isBoss () && !me) printf ("threaded_cg_mixed_restarted_multi_shift_MdagM: Starting main iteration loop\n"); int iter = 0; for (int i = 0; i < max_cycle; ++i) { // compute double precision rsd and also new RHS vector for each shift int fin_count = 0; for (int n = 0; n < nshift; n++) { if (!finished[n]) { MdagMplusShift < double >(sol[n], tv1_d, mass[n], src_d, tv2_d, bfm_d); // tv1_d = (MdagM + mass[n]) * sol[n] double norm = bfm_d.axpy_norm (src_d, tv1_d, src, -1.); //ad hoc stopping condition from cg_mixed implementation if (norm < 100. * stop[n]) finished[n] = 1; if (bfm_f.isBoss () && !me) printf ("threaded_cg_mixed_restarted_multi_shift_MdagM: iter = %d shift = %d rsd = %17.10e(d) stop = %17.10e(d) finished = %d\n", i, n, norm, stop[n], finished[n]); while (norm * fresidual[n] * fresidual[n] < stop[n]) fresidual[n] *= 2; threaded_convFermion (src_f[n], src_d, bfm_f, bfm_d); } fin_count += finished[n]; } if (fin_count == nshift) break; //stop when all have finished switch_comm (bfm_f, bfm_d); iter += CGNE_prec_MdagM_multi_shift_multi_src (sol_f, src_f, mass, alpha, nshift, fresidual, 0, bfm_f); switch_comm (bfm_d, bfm_f); for (int n = 0; n < nshift; n++) { threaded_convFermion (tv1_d, sol_f[n], bfm_d, bfm_f); bfm_d.axpy (sol[n], tv1_d, sol[n], 1.); } } bfm_d.threadedFreeFermion (src_d); bfm_d.threadedFreeFermion (tv1_d); bfm_d.threadedFreeFermion (tv2_d); for (int i = 0; i < nshift; i++) { bfm_f.threadedFreeFermion (sol_f[i]); bfm_f.threadedFreeFermion (src_f[i]); } if (bfm_f.isBoss () && !me) printf ("threaded_cg_mixed_restarted_multi_shift_MdagM: Running double precision multi-mass using single precision version as guess"); iter += CGNE_prec_MdagM_multi_shift_with_guesses (sol, src, sol, mass, alpha, nshift, dresidual, single, bfm_d); return iter; } //CK: mixed precision multi-mass using single precision solve as guess for double precision // Both sol and src are double precision fermions. Single precision // solver is only used internally. // mass, alpha and nshift as usual // dresidual are the target residuals // fresidual are the initial single precision residuals, which are dynamically modified during the solve // // Things to be set before using this function: // // double precision solver mass, max iteration // number. // // single precision solver mass, max iteration // number. // // Gauge field must be initialized for both double and single prec // solvers. // // the communication subsystem must be ready for bfm_d to use (due to // the way bfmcommspi is written, one must reinitialize the // communication object when switching between single and double // precisions). // // max_cycle: the maximum number of restarts will be performed. inline int threaded_cg_mixed_single_prec_as_guess_multi_shift_MdagM (Fermion_t sol[], Fermion_t src, double mass[], double alpha[], int nshift, double dresidual [], double fresidual [], int single, bfm_evo < double >&bfm_d, bfm_evo < float >&bfm_f) { int me = bfm_d.thread_barrier (); //Source and solution locations for single precision input and output Fermion_t sol_f[nshift]; for (int n = 0; n < nshift; n++) sol_f[n] = bfm_f.threadedAllocFermion (); Fermion_t src_f = bfm_f.threadedAllocFermion (); int iter = 0; //Do an initial single precision solve with regular multi-mass solver using input residuals if (bfm_f.isBoss () && !me) printf ("threaded_cg_mixed_single_prec_as_guess_multi_shift_MdagM: Doing single precision solve\n"); { threaded_convFermion (src_f, src, bfm_f, bfm_d); switch_comm (bfm_f, bfm_d); iter += bfm_f.CGNE_prec_MdagM_multi_shift (sol_f, src_f, mass, alpha, nshift, fresidual, 0); for (int n = 0; n < nshift; n++) threaded_convFermion (sol[n], sol_f[n], bfm_d, bfm_f); switch_comm (bfm_d, bfm_f); } bfm_f.threadedFreeFermion (src_f); for (int n = 0; n < nshift; n++) bfm_f.threadedFreeFermion (sol_f[n]); if (bfm_f.isBoss () && !me) printf ("threaded_cg_mixed_single_prec_as_guess_multi_shift_MdagM: Running double precision multi-mass using single precision version as guess"); iter += CGNE_prec_MdagM_multi_shift_with_guesses (sol, src, sol, mass, alpha, nshift, dresidual, single, bfm_d); return iter; } //CK: "Single Shift Inverter" : Modified version of int bfm::CGNE_prec that solves (MdagM + shift) psi = src template < typename Float > int threaded_CGNE_MdagM_plus_shift (Fermion_t psi, Fermion_t src, Float shift, bfm_evo < Float > &bfm) { //Standard CG algorithm from BFM: //(Use subscript to label iteration) //r_1 = MMdag psi - src, p_1 = MMdag psi - src, c_1 = |r_1|^2 = |p_1|^2 //Iteration: //d_k = |M p_k|^2 = p_k^dag M^dag M p_k //a_k = c_k / d_k //r_k+1 = MMdag p_k - a_k r_k //c_k+1 = |r_k+1|^2 //b_k = c_k+1 / c_k //psi = a_k p_k + psi //p_k+1 = b_k p_k + r_k+1 //Note: norm(vec) is actually |vec|^2 //Shift modified version should look similar: //r_1 = (MMdag+shift) psi - src, p_1 = (MMdag+shift) psi - src, c_1 = |r_1|^2 = |p_1|^2 //Iteration: //d_k = p_k^dag M^dag M p_k + shift * p_k^dag p_k //a_k = c_k / d_k //r_k+1 = (MMdag+shift) p_k - a_k r_k //c_k+1 = |r_k+1|^2 //b_k = c_k+1 / c_k //psi = a_k p_k + psi //p_k+1 = b_k p_k + r_k+1 int me = bfm.thread_barrier (); int verbose = bfm.verbose; double f; double cp, c, a, d, b; double residual = bfm.residual; int max_iter = bfm.max_iter; if (bfm.isBoss () && (!me)) { bfm.InverterEnter (); } Fermion_t p = bfm.threadedAllocFermion (mem_fast); Fermion_t tmp = bfm.threadedAllocFermion (mem_fast); Fermion_t mp = bfm.threadedAllocFermion (mem_fast); Fermion_t mmp = bfm.threadedAllocFermion (mem_fast); Fermion_t r = bfm.threadedAllocFermion (mem_fast); //Initial residual computation & set up double guess = bfm.norm (psi); d = bfm.Mprec (psi, mp, tmp, DaggerNo); bfm.Mprec (mp, mmp, tmp, DaggerYes); b = bfm.axpy_norm (mmp, psi, mmp, shift); //MMdag psi + shift*psi cp = bfm.axpy_norm (r, mmp, src, -1.0); a = bfm.axpy_norm (p, mmp, src, -1.0); //a = bfm.norm(p); //cp= bfm.norm(r); //r_1 = (MMdag+shift) psi - src, p_1 = (MMdag+shift) psi - src, c_1 = |r_1|^2 = |p_1|^2 Float ssq = bfm.norm (src); if (verbose && bfm.isBoss () && !me) { printf ("mixed_cg::CGNE_MdagM_plus_shift gues %le \n", guess); printf ("mixed_cg::CGNE_MdagM_plus_shift src %le \n", ssq); printf ("mixed_cg::CGNE_MdagM_plus_shift Mp %le \n", d); printf ("mixed_cg::CGNE_MdagM_plus_shift (MMdag + shift)p %le \n", b); printf ("mixed_cg::CGNE_MdagM_plus_shift r %le \n", cp); printf ("mixed_cg::CGNE_MdagM_plus_shift p %le \n", a); } Float rsq = residual * residual * ssq; //Check if guess is really REALLY good :) if (cp <= rsq) { if (verbose && bfm.isBoss () && !me) { printf ("mixed_cg::CGNE_MdagM_plus_shift k=0 converged - suspiciously nice guess %le %le\n", cp, rsq); } bfm.threadedFreeFermion (tmp); bfm.threadedFreeFermion (p); bfm.threadedFreeFermion (mp); bfm.threadedFreeFermion (mmp); bfm.threadedFreeFermion (r); if (bfm.isBoss () && (!me)) { bfm.InverterExit (); } return 0; } if (verbose && bfm.isBoss () && !me) printf ("mixed_cg::CGNE_MdagM_plus_shift k=0 residual %le rsq %le\n", cp, rsq); if (bfm.isBoss () && !me) { if (bfm.watchfile) { printf ("mixed_cg::CGNE_MdagM_plus_shift watching file \"%s\"\n", bfm.watchfile); } } struct timeval start, stop; if (bfm.isBoss () && !me) gettimeofday (&start, NULL); for (int k = 1; k <= max_iter; k++) { bfm.iter = k; uint64_t t_iter_1 = GetTimeBase (); c = cp; uint64_t t_mprec_1 = GetTimeBase (); //d_k = p_k^dag M^dag M p_k + shift * p_k^dag p_k d = bfm.Mprec (p, mp, tmp, 0, 1); double norm_p = bfm.norm (p); d += shift * norm_p; uint64_t t_mprec_2 = GetTimeBase (); a = c / d; uint64_t t_mprec_3 = GetTimeBase (); bfm.Mprec (mp, mmp, tmp, 1); bfm.axpy (mmp, p, mmp, shift); // mmp = MMdag p + shift * p uint64_t t_mprec_4 = GetTimeBase (); uint64_t tr1 = GetTimeBase (); cp = bfm.axpy_norm (r, mmp, r, -a); //r_k+1 = (MMdag+shift) p_k - a_k r_k b = cp / c; uint64_t tr2 = GetTimeBase (); uint64_t tpsi1 = GetTimeBase (); bfm.axpy (psi, p, psi, a); uint64_t tpsi2 = GetTimeBase (); // New (conjugate/M-orthogonal) search direction uint64_t tp1 = GetTimeBase (); bfm.axpy (p, p, r, b); uint64_t tp2 = GetTimeBase (); uint64_t t_iter_2 = GetTimeBase (); // verbose nonsense if ((bfm.iter == bfm.time_report_iter) && bfm.isBoss () && (!me) && verbose) { int lx = bfm.node_latt[0]; int ly = bfm.node_latt[1]; int lz = bfm.node_latt[2]; int lt = bfm.node_latt[3]; int cb4dsites = (lx * ly * lz * lt) / 2; printf ("fermionCacheFootprint: %ld \n", 7 * bfm.axpyBytes () / 3); printf ("gauge CacheFootprint: %ld \n", 2 * 18 * 8 * cb4dsites * 2); printf ("fermionVecBytes : %ld \n", bfm.axpyBytes () / 3); printf ("axpyBytes : %ld \n", bfm.axpyBytes ()); printf ("axpy (soln) : %ld cyc %le MB/s\n", (tpsi2 - tpsi1), (double) bfm.axpyBytes () * 1600. / (tpsi2 - tpsi1)); printf ("axpy_norm (residual) : %ld cyc %le MB/s\n", (tr2 - tr1), (double) bfm.axpyBytes () * 1600. / (tr2 - tr1)); printf ("axpy (search) : %ld cyc %le MB/s\n", (tp2 - tp1), (double) bfm.axpyBytes () * 1600. / (tp2 - tp1)); printf ("Iter time : %ld cyc\n", t_iter_2 - t_iter_1); printf ("linalg time : %ld cyc\n", t_iter_2 - t_iter_1 - (t_mprec_2 - t_mprec_1) - (t_mprec_4 - t_mprec_3)); printf ("Mprec time : %ld cyc\n", t_mprec_2 - t_mprec_1); printf ("Mprec time : %ld cyc\n", t_mprec_4 - t_mprec_3); fflush (stdout); } if (((k % 100 == 0) && (verbose != 0)) || (verbose > 10)) { if (bfm.isBoss () && !me) { printf ("mixed_cg::CGNE_MdagM_plus_shift: k=%d r^2=%le %le %lx\n", k, cp, sqrt (cp / ssq), &bfm); } } // Stopping condition if (cp <= rsq) { //I did not update the flops count so I have commented them out struct timeval diff; if (bfm.isBoss () && !me) { gettimeofday (&stop, NULL); timersub (&stop, &start, &diff); } if (bfm.isBoss () && !me) printf ("mixed_cg::CGNE_MdagM_plus_shift converged in %d iterations\n", k); if (bfm.isBoss () && !me) printf ("mixed_cg::CGNE_MdagM_plus_shift converged in %d.%6.6d s\n", diff.tv_sec, diff.tv_usec); //double flops = mprecFlops()*2.0 + 2.0*axpyNormFlops() + axpyFlops()*2.0; //flops = flops * k; //double t = diff.tv_sec*1.0E6 + diff.tv_usec; // if ( isBoss()&& !me ) // printf("mixed_cg::CGNE_MdagM_plus_shift: %d mprec flops/site\n",mprecFlopsPerSite()); // if ( isBoss()&& !me ) printf("mixed_cg::CGNE_MdagM_plus_shift: %le flops\n",flops); // if ( isBoss()&& !me ) printf("mixed_cg::CGNE_MdagM_plus_shift: %le mflops per node\n",flops/t); if (bfm.isBoss () && !me) { printf ("mixed_cg::CGNE_MdagM_plus_shift calculating true resid. V0\n"); fflush (stdout); } //DEBUG bfm.Mprec (psi, mp, tmp, 0); bfm.Mprec (mp, mmp, tmp, 1); bfm.axpy (mmp, psi, mmp, shift); double resid = bfm.axpy_norm (tmp, src, mmp, -1.0); double src_norm = bfm.norm (src); double true_residual = sqrt (resid / src_norm); if (bfm.isBoss () && !me) printf ("mixed_cg::CGNE_MdagM_plus_shift: true residual is %le \n", true_residual); if (bfm.isBoss () && !me) { printf ("mixed_cg::CGNE_MdagM_plus_shift cleaning up\n"); fflush (stdout); } //DEBUG bfm.threadedFreeFermion (tmp); bfm.threadedFreeFermion (p); bfm.threadedFreeFermion (mp); bfm.threadedFreeFermion (mmp); bfm.threadedFreeFermion (r); #ifdef LIST_ENGINE if (bfm.list_engine) bfm.L1P_PatternUnconfigure (); #endif if (bfm.isBoss () && (!me)) { bfm.InverterExit (); } return k; } } if (bfm.isBoss () && !me) printf ("mixed_cg::CGNE_MdagM_plus_shift: CG not converged \n"); bfm.threadedFreeFermion (tmp); bfm.threadedFreeFermion (p); bfm.threadedFreeFermion (mp); bfm.threadedFreeFermion (mmp); bfm.threadedFreeFermion (r); #ifdef LIST_ENGINE if (bfm.list_engine) bfm.L1P_PatternUnconfigure (); #endif if (bfm.isBoss () && (!me)) { bfm.InverterExit (); } return -1; } //CK: Single precision solve followed by defect correction loop using single shift solver independently // for each shift // Both sol and src are double precision fermions. Single precision // solver is only used internally. // // Things to be set before using this function: // // double precision solver mass, stopping condition, max iteration // number. // // single precision solver mass, stopping condition, max iteration // number. // // Gauge field must be initialized for both double and single prec // solvers. // // the communication subsystem must be ready for bfm_d to use (due to // the way bfmcommspi is written, one must reinitialize the // communication object when switching between single and double // precisions). // // max_cycle: the maximum number of restarts will be performed. //fresidual are the residuals used for the initial single precision solve //min_fp_resid: the smallest value for the residual of the initial single-precision multi-mass solve inline int threaded_cg_mixed_defect_correction_multi_shift_MdagM (Fermion_t sol[], Fermion_t src, double mass[], double alpha[], bfm_evo < double >&bfm_d, bfm_evo < float >&bfm_f, int nshift, double mresidual[], double fresidual[], int single, int max_cycle) { int me = bfm_d.thread_barrier (); double frsd = bfm_f.residual; //save original residual for later restoration //First we perform the multi-mass inversion using the single-precision solver Fermion_t src_f = bfm_f.threadedAllocFermion (); Fermion_t sol_f[nshift]; for (int i = 0; i < nshift; i++) sol_f[i] = bfm_f.threadedAllocFermion (); threaded_convFermion (src_f, src, bfm_f, bfm_d); switch_comm (bfm_f, bfm_d); int single_prec_iter = bfm_f.CGNE_prec_MdagM_multi_shift (sol_f, src_f, mass, alpha, nshift, fresidual, 0); if (bfm_f.isBoss () && !me) { printf ("threaded_cg_mixed_defect_correction_multi_shift_MdagM: single-prec multi-shift iter = %d\n", single_prec_iter); } //Now we loop through the shifted solutions and do defect-correction on each individually switch_comm (bfm_d, bfm_f); for (int i = 0; i < nshift; i++) threaded_convFermion (sol[i], sol_f[i], bfm_d, bfm_f); double src_norm = bfm_d.norm (src); Fermion_t tv1_d = bfm_d.threadedAllocFermion (); Fermion_t tv2_d = bfm_d.threadedAllocFermion (); Fermion_t src_d = bfm_d.threadedAllocFermion (); int iter = 0; for (int shift = 0; shift < nshift; shift++) { double stop = src_norm * mresidual[shift] * mresidual[shift]; bfm_f.thread_barrier (); //make sure no threads have yet to write to bfm_f.residual from previous loop cycle bfm_f.residual = mresidual[shift]; for (int i = 0; i < max_cycle; ++i) { // compute double precision rsd and also new RHS vector. bfm_d.Mprec (sol[shift], tv1_d, src_d, 0, 0); //here src_d is just used as a temp storage bfm_d.Mprec (tv1_d, tv2_d, src_d, 1, 0); // tv2_d = MdagM * sol bfm_d.axpy (tv2_d, sol[shift], tv2_d, mass[shift]); //tv2_d = (MdagM + shift)* sol double norm = bfm_d.axpy_norm (src_d, tv2_d, src, -1.); // Hantao's ad hoc stopping condition if (norm < 100. * stop) break; if (!me) while (norm * bfm_f.residual * bfm_f.residual < stop) bfm_f.residual *= 2; //bfm_f.thread_barrier(); //Not needed because there is a barrier at start of next call if (bfm_f.isBoss () && !me) { printf ("threaded_cg_mixed_defect_correction_multi_shift_MdagM: shift = %d, defect correction cycle = %d rsd = %17.10e(d) stop = %17.10e(d) [True resid %17.10e(d), next single prec target resid %17.10e]\n", shift, i, norm, stop, sqrt (norm / src_norm), bfm_f.residual); } //We need to invert MdagM + shift, for which we cannot use the regular inverter. Use my optimised single-shift inverter //Could also use the multi-shift with a single shift, but we can avoid some overhead by using my optimised version threaded_convFermion (src_f, src_d, bfm_f, bfm_d); switch_comm (bfm_f, bfm_d); bfm_f.set_zero (sol_f[shift]); iter += threaded_CGNE_MdagM_plus_shift < float >(sol_f[shift], src_f, mass[shift], bfm_f); switch_comm (bfm_d, bfm_f); threaded_convFermion (tv1_d, sol_f[shift], bfm_d, bfm_f); bfm_d.axpy (sol[shift], tv1_d, sol[shift], 1.); } bfm_f.residual = frsd; //restore original single precision residual at end of each step } bfm_d.threadedFreeFermion (src_d); bfm_d.threadedFreeFermion (tv1_d); bfm_d.threadedFreeFermion (tv2_d); for (int i = 0; i < nshift; i++) bfm_f.threadedFreeFermion (sol_f[i]); bfm_f.threadedFreeFermion (src_f); for (int shift = 0; shift < nshift; shift++) { if (bfm_d.isBoss () && !me) printf ("threaded_cg_mixed_defect_correction_multi_shift_MdagM: doing final inversion for shift %d using corrected solution as guess\n", shift); double restore_resid = bfm_d.residual; bfm_d.thread_barrier (); //make sure all threads get the same value before we change it bfm_d.residual = mresidual[shift]; iter += threaded_CGNE_MdagM_plus_shift < double >(sol[shift], src, mass[shift], bfm_d); bfm_d.residual = restore_resid; //bfm_d.thread_barrier(); //Not needed because barrier in next call double sol_norm = bfm_d.norm (sol[shift]); if (bfm_d.isBoss () && !me) printf ("threaded_cg_mixed_defect_correction_multi_shift_MdagM: final sol[%d] norm = %17.10e\n", shift, sol_norm); } if (single) { for (int s = 1; s < nshift; s++) { bfm_d.axpy (sol[0], sol[s], sol[0], 1.0); } } return iter; } //CK 2014: The version below performs the multi-shift with the matrix multiplication in single precision. The residual is stored in single precision, but the search directions and solution are //stored in double precision. Every update_freq iterations the residual is corrected in double precision. //Note that the final double precision residuals may not be as good as desired, so you may want to perform defect correction on each pole afterwards. I have added a version that does this extra step below. inline int threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp (Fermion_t psi[], Fermion_t src, double mass[], double alpha[], int nshift, double mresidual[], int single, bfm_evo < float >&bfm_f, bfm_evo < double >&bfm_d, int update_freq = 100, int report_freq = -1) { //NOTE: Assumes bfm_d comms are active //update_freq is the frequency at which the reliable update step is performed //report_freq prints the double precision true residual when k % report_freq = 0. Use -1 to disable int me = bfm_d.thread_barrier (); double bs[nshift]; double rsq[nshift]; double z[nshift][2]; int converged[nshift]; const int primary = 0; //Primary shift fields CG iteration double a, b, c, d; double cp, bp; //prev //Single precision fields Fermion_t r = bfm_f.threadedAllocFermion (mem_slow); //residual vector, single precision Fermion_t tmp = bfm_f.threadedAllocFermion (mem_fast); Fermion_t p = bfm_f.threadedAllocFermion (mem_fast); Fermion_t mp = bfm_f.threadedAllocFermion (mem_fast); Fermion_t mmp = bfm_f.threadedAllocFermion (mem_fast); Fermion_t src_f = bfm_f.threadedAllocFermion (mem_slow); mixed_cg::threaded_convFermion_fast (src_f, src, bfm_f, bfm_d); //Double precision fields Fermion_t p_d = bfm_d.threadedAllocFermion (mem_fast); //search direction, double precision Fermion_t tmp_d = bfm_d.threadedAllocFermion (mem_fast); Fermion_t mp_d = bfm_d.threadedAllocFermion (mem_fast); Fermion_t mmp_d = bfm_d.threadedAllocFermion (mem_fast); Fermion_t ps_d[nshift]; // search directions (double precision) for (int i = 0; i < nshift; i++) { ps_d[i] = bfm_d.threadedAllocFermion (mem_slow); converged[i] = 0; } #define DEALLOCATE_ALL \ bfm_f.threadedFreeFermion(r); \ bfm_f.threadedFreeFermion(tmp); \ bfm_f.threadedFreeFermion(p); \ bfm_f.threadedFreeFermion(mp); \ bfm_f.threadedFreeFermion(mmp); \ bfm_f.threadedFreeFermion(src_f); \ bfm_d.threadedFreeFermion(p_d); \ bfm_d.threadedFreeFermion(tmp_d); \ bfm_d.threadedFreeFermion(mp_d); \ bfm_d.threadedFreeFermion(mmp_d); \ for(int s=0;s<nshift;s++) bfm_d.threadedFreeFermion(ps_d[s]) // Check lightest mass for (int s = 0; s < nshift; s++) { if (mass[s] < mass[primary]) { printf ("First shift not lightest - oops\n"); exit (-1); } } cp = bfm_d.norm (src); for (int s = 0; s < nshift; s++) { rsq[s] = cp * mresidual[s] * mresidual[s]; bfm_d.copy (ps_d[s], src); } // r and p for primary bfm_f.copy (r, src_f); //residual vector in single prec bfm_d.copy (p_d, src); double rn = cp; //norm of src = p_d mixed_cg::switch_comm (bfm_f, bfm_d); mixed_cg::threaded_convFermion_fast (p, p_d, bfm_f, bfm_d); d = bfm_f.Mprec (p, mp, tmp, DaggerNo, 1); //mp = Mpc p, what is the 'norm' of?? I think its |Mpc p|^2 bfm_f.Mprec (mp, mmp, tmp, DaggerYes); //mmp = Mpc^dag mp = Mpc^dag Mpc p bfm_f.axpy (mmp, p, mmp, mass[0]); //mmp = p*mass[0]+mmp d += rn * mass[0]; b = -cp / d; if (bfm_f.isBoss () && !me) printf ("bfmbase::CGNE_prec_multi: b = -cp/d = -%le/%le = %le\n", cp, d, b); // Set up the various shift variables int iz = 0; z[0][1 - iz] = 1.0; z[0][iz] = 1.0; bs[0] = b; for (int s = 1; s < nshift; s++) { z[s][1 - iz] = 1.0; z[s][iz] = 1.0 / (1.0 - b * (mass[s] - mass[0])); bs[s] = b * z[s][iz]; // Sign relative to Mike - FIXME } c = bfm_f.axpy_norm (r, mmp, r, b); if (bfm_f.isBoss () && !me) printf ("bfmbase::CGNE_prec_multi: k=0 residual %le \n", c); for (int s = 0; s < nshift; s++) { bfm_d.axpby (psi[s], src, src, 0., -bs[s] * alpha[s]); //initialize double prec solutions } // Iteration loop for (int k = 1; k <= bfm_f.max_iter; k++) { a = c / cp; #define CK_BAGEL_OPTIMISE #ifndef CK_BAGEL_OPTIMISE mixed_cg::threaded_convFermion_fast (tmp_d, r, bfm_d, bfm_f); //store double prec copy of r in tmp_d bfm_d.axpy (p_d, p_d, tmp_d, a); for (int s = 0; s < nshift; s++) { if (!converged[s]) { if (s == 0) { bfm_d.axpy (ps_d[s], ps_d[s], tmp_d, a); } else { double as = a * z[s][iz] * bs[s] / (z[s][1 - iz] * b); bfm_d.axpby (ps_d[s], tmp_d, ps_d[s], z[s][iz], as); //ps_d[s] = z[s][iz]*tmp_d + as*ps_d[s] } } } #else //Note, I moved the update of the search vectors to further down so it can potentially be combined with the solution vector update double as_uc[nshift + 1], z_uc[nshift + 1]; Fermion_t ps_d_unconv[nshift + 1]; int nunconv = 0; for (int s = 0; s < nshift; s++) if (!converged[s]) { ps_d_unconv[nunconv] = ps_d[s]; z_uc[nunconv] = z[s][iz]; if (s == 0) as_uc[nunconv] = a; else as_uc[nunconv] = a * z[s][iz] * bs[s] / (z[s][1 - iz] * b); ++nunconv; } //# ifdef USE_NEW_BFM_GPARITY #if 1 bfm_d.axpy_sy (p_d, p_d, r, a); #else bfm_d.axpy (p_d, p_d, r, a, 1); #endif #endif cp = c; mixed_cg::threaded_convFermion_fast (p, p_d, bfm_f, bfm_d); d = bfm_f.Mprec (p, mp, tmp, DaggerNo, 1); bfm_f.Mprec (mp, mmp, tmp, DaggerYes); bfm_f.axpy (mmp, p, mmp, mass[0]); double rn = bfm_f.norm (p); d += rn * mass[0]; bp = b; b = -cp / d; // Toggle the recurrence history bs[0] = b; iz = 1 - iz; for (int s = 1; s < nshift; s++) { if (!converged[s]) { double z0 = z[s][1 - iz]; double z1 = z[s][iz]; z[s][iz] = z0 * z1 * bp / (b * a * (z1 - z0) + z1 * bp * (1 - (mass[s] - mass[0]) * b)); bs[s] = b * z[s][iz] / z0; // NB sign rel to Mike } } #define CK_BAGEL_OPTIMISE_COMBINE_PSI_PS #ifndef CK_BAGEL_OPTIMISE_COMBINE_PSI_PS # ifdef CK_BAGEL_OPTIMISE //Update the search vectors here rather than above bfm_d.axpby_multi_reusey (ps_d_unconv, ps_d_unconv, r, as_uc, z_uc, nunconv, 1); # endif for (int s = 0; s < nshift; s++) { int ss = s; if (!converged[s]) bfm_d.axpy (psi[ss], ps_d[s], psi[ss], -bs[s] * alpha[s]); } #else //CK_BAGEL_OPTIMISE_COMBINE_PSI_PS, combine the above steps double c_uc[nunconv]; Fermion_t psi_d_unconv[nunconv]; int off = 0; for (int s = 0; s < nshift; s++) if (!converged[s]) { c_uc[off] = -bs[s] * alpha[s]; psi_d_unconv[off++] = psi[s]; } bfm_d.cgmulti_update_srch_sol (psi_d_unconv, ps_d_unconv, r, as_uc, z_uc, c_uc, nunconv, 1); #endif //Reliable update if (k % update_freq == 0) { double c_sp = bfm_f.axpy_norm (r, mmp, r, b); //Replace r with true residual mixed_cg::switch_comm (bfm_d, bfm_f); bfm_d.Mprec (psi[0], mp_d, tmp_d, 0, 1); bfm_d.Mprec (mp_d, mmp_d, tmp_d, 1); bfm_d.axpy (mmp_d, psi[0], mmp_d, mass[0]); c = bfm_d.axpy_norm (tmp_d, mmp_d, src, -1.0); if (bfm_d.isBoss () && !me) printf ("bfmbase::CGNE_prec_multi: reliable update iter %d, replaced |r|^2 = %.12le with |r|^2 = %.12le\n", k, c_sp, c); mixed_cg::threaded_convFermion_fast (r, tmp_d, bfm_f, bfm_d); mixed_cg::switch_comm (bfm_f, bfm_d); } else { c = bfm_f.axpy_norm (r, mmp, r, b); } // Convergence checks int all_converged = 1; if (((k % 100) == 0) && bfm_f.isBoss () && (!me)) printf ("bfmbase::CGNE_prec_multi: k=%d c=%g, shift in current dir for lightest pole %.12e\n", k, c, -bs[0] * alpha[0]); for (int s = 0; s < nshift; s++) { if (!converged[s]) { double css = c * z[s][iz] * z[s][iz]; if (css < rsq[s]) converged[s] = 1; else all_converged = 0; if (bfm_f.isBoss () && (!me) && converged[s]) printf ("bfmbase::CGNE_prec_multi: Shift %d converged on iter %d: test cur %g, targ %g [Stated true resid %g].\n", s, k, css, rsq[s], (css / rsq[s]) * mresidual[s]); else if (((k % 100) == 0) && bfm_f.isBoss () && (!me)) printf ("bfmbase::CGNE_prec_multi: Shift %d convergence test cur %g, targ %g [Stated true resid %g].\n", s, css, rsq[s], sqrt (css / rsq[s]) * mresidual[s]); } } if (converged[0] && !all_converged) { if (bfm_f.isBoss () && !me) printf ("bfmbase::CGNE_prec_multi: WARNING, shift[0] has converged but not all higher mass poles have. Algorithm ending here!\n"); all_converged = 1; } if (all_converged) { if (bfm_f.isBoss () && (!me)) printf ("bfmbase::CGNE_prec_multi: k=%d All shifts have converged\n", k); if (bfm_f.isBoss () && (!me)) printf ("bfmbase::CGNE_prec_multi: k=%d Checking solutions\n", k); // Check answers mixed_cg::switch_comm (bfm_d, bfm_f); for (int s = 0; s < nshift; s++) { //Convert solution to double precision bfm_d.Mprec (psi[s], mp_d, tmp_d, DaggerNo); bfm_d.Mprec (mp_d, mmp_d, tmp_d, DaggerYes); bfm_d.axpy (tmp_d, psi[s], mmp_d, mass[s]); bfm_d.axpy (mp_d, tmp_d, src, -1); double rn = bfm_d.norm (mp_d); double cn = bfm_d.norm (src); if (bfm_d.isBoss () && !me) { printf ("double prec final: shift[%d] true residual %.12le \n", s, sqrt (rn / cn)); } } if (single) { for (int s = 1; s < nshift; s++) { bfm_d.axpy (psi[0], psi[s], psi[0], 1.0); } } DEALLOCATE_ALL; return k; } else if (report_freq != -1 && k % report_freq == 0) { mixed_cg::switch_comm (bfm_d, bfm_f); for (int s = 0; s < nshift; s++) { double css = c * z[s][iz] * z[s][iz]; bfm_d.Mprec (psi[s], mp_d, tmp_d, DaggerNo); bfm_d.Mprec (mp_d, mmp_d, tmp_d, DaggerYes); bfm_d.axpy (tmp_d, psi[s], mmp_d, mass[s]); bfm_d.axpy (mp_d, tmp_d, src, -1); double rn = bfm_d.norm (mp_d); double cn = bfm_d.norm (src); if (bfm_d.isBoss () && !me) { printf ("iter %d, double prec: shift[%d] true residual %.12le, running true residual %.12le [converged = %d]\n", k, s, sqrt (rn / cn), sqrt (css / rsq[s]) * mresidual[s], converged[s]); } } mixed_cg::switch_comm (bfm_f, bfm_d); } } mixed_cg::switch_comm (bfm_d, bfm_f); if (bfm_d.isBoss () && !me) printf ("bfmbase::CGNE_prec_multi: CG not converged \n"); DEALLOCATE_ALL; return -1; } #undef DEALLOCATE_ALL //This version has the following steps: //1) Single precision multi-mass solve with reliable update and double precision shift vectors //2) Single precision restarted CG with defect correction loop over poles //3) Double precision restarted CG with defect correction loop over poles inline int threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction (Fermion_t psi[], Fermion_t src, double mass[], double alpha[], int nshift, double mresidual[], int single, bfm_evo < float >&bfm_f, bfm_evo < double >&bfm_d, int update_freq = 100, int report_freq = -1, int max_cycle = 10) { int me = bfm_d.thread_barrier (); double frsd = bfm_f.residual; //save original residual for later restoration struct timeval tstart, tstop, tdiff; gettimeofday (&tstart, NULL); int iter_multi = threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp (psi, src, mass, alpha, nshift, mresidual, 0, bfm_f, bfm_d, update_freq, report_freq); gettimeofday (&tstop, NULL); timersub (&tstop, &tstart, &tdiff); if (bfm_d.isBoss () && !me) printf ("threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction: Initial multi-shift iter = %d, time %d.%6.6d s\n", iter_multi, tdiff.tv_sec, tdiff.tv_usec); gettimeofday (&tstart, NULL); Fermion_t src_f = bfm_f.threadedAllocFermion (); Fermion_t sol_f = bfm_f.threadedAllocFermion (); Fermion_t src_d = bfm_d.threadedAllocFermion (); Fermion_t tv1_d = bfm_d.threadedAllocFermion (mem_fast); Fermion_t tv2_d = bfm_d.threadedAllocFermion (mem_fast); double src_norm = bfm_d.norm (src); int iter = 0; for (int shift = 0; shift < nshift; shift++) { double stop = src_norm * mresidual[shift] * mresidual[shift]; bfm_f.thread_barrier (); //ensure all thread writes to bfm_f.residual from previous iteration have completed bfm_f.residual = mresidual[shift]; for (int i = 0; i < max_cycle; ++i) { // compute double precision rsd and also new RHS vector. bfm_d.Mprec (psi[shift], tv1_d, src_d, 0, 0); //here src_d is just used as a temp storage bfm_d.Mprec (tv1_d, tv2_d, src_d, 1, 0); // tv2_d = MdagM * sol bfm_d.axpy (tv2_d, psi[shift], tv2_d, mass[shift]); //tv2_d = (MdagM + shift)* sol double norm = bfm_d.axpy_norm (src_d, tv2_d, src, -1.); // Hantao's ad hoc stopping condition if (norm < 100. * stop) { if (bfm_f.isBoss () && !me) { printf ("threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction: shift = %d needs no correction: rsd = %17.10e(d) stop = %17.10e(d) [True resid %17.10e(d)]\n", shift, norm, stop, sqrt (norm / src_norm)); } break; } if (!me) while (norm * bfm_f.residual * bfm_f.residual < stop) bfm_f.residual *= 2; //bfm_f.thread_barrier(); //No need, next call has a barrier threaded_convFermion_fast (src_f, src_d, bfm_f, bfm_d); switch_comm (bfm_f, bfm_d); if (bfm_f.isBoss () && !me) { printf ("threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction: shift = %d, defect correction cycle = %d rsd = %17.10e(d) stop = %17.10e(d) [True resid %17.10e(d), next single prec target resid %17.10e]\n", shift, i, norm, stop, sqrt (norm / src_norm), bfm_f.residual); } bfm_f.set_zero (sol_f); iter += threaded_CGNE_MdagM_plus_shift < float >(sol_f, src_f, mass[shift], bfm_f); switch_comm (bfm_d, bfm_f); threaded_convFermion_fast (tv1_d, sol_f, bfm_d, bfm_f); bfm_d.axpy (psi[shift], tv1_d, psi[shift], 1.); } bfm_f.residual = frsd; //restore original single precision residual at end of each step } gettimeofday (&tstop, NULL); timersub (&tstop, &tstart, &tdiff); if (bfm_d.isBoss () && !me) printf ("threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction: defect correction time %d.%6.6d s\n", tdiff.tv_sec, tdiff.tv_usec); gettimeofday (&tstart, NULL); for (int shift = 0; shift < nshift; shift++) { if (bfm_d.isBoss () && !me) { printf ("threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction: doing final inversion for shift %d using corrected solution as guess\n", shift); fflush (stdout); } bfm_d.thread_barrier (); //ensure writes to bfm_d.residual from previous iteration have completed double restore_resid = bfm_d.residual; bfm_d.thread_barrier (); //ensure all threads have the same value bfm_d.residual = mresidual[shift]; int final_iter = threaded_CGNE_MdagM_plus_shift < double >(psi[shift], src, mass[shift], bfm_d); if (final_iter == -1) { cps::ERR.General ("mixed_cg", "threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction", "final inversion for pole %d failed to converge!", shift); } iter += final_iter; bfm_d.residual = restore_resid; } if (single) { for (int s = 1; s < nshift; s++) { bfm_d.axpy (psi[0], psi[s], psi[0], 1.0); } } bfm_d.threadedFreeFermion (src_d); bfm_f.threadedFreeFermion (src_f); bfm_d.threadedFreeFermion (tv1_d); bfm_d.threadedFreeFermion (tv2_d); bfm_f.threadedFreeFermion (sol_f); gettimeofday (&tstop, NULL); timersub (&tstop, &tstart, &tdiff); if (bfm_d.isBoss () && !me) printf ("threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction: finishing up time %d.%6.6d s\n", tdiff.tv_sec, tdiff.tv_usec); return iter; } //MaybeOK, but MooeeInv with Gparity should be checked. Disabling for now //#ifndef BFM_GPARITY #if 0 inline int threaded_cg_mixed_Mdag (Fermion_t sol[2], Fermion_t src[2], bfm_evo < double >&bfm_d, bfm_evo < float >&bfm_f, int max_cycle, cps::InverterType itype = cps::CG, // the following parameters are for deflation multi1d < Fermion_t[2] > *evec = NULL, multi1d < float >*eval = NULL, int N = 0) { int me = bfm_d.thread_barrier (); Fermion_t be = bfm_d.threadedAllocFermion (); Fermion_t bo = bfm_d.threadedAllocFermion (); Fermion_t ta = bfm_d.threadedAllocFermion (); Fermion_t tb = bfm_d.threadedAllocFermion (); double nsrc = bfm_d.norm (src[0]) + bfm_d.norm (src[1]); if (bfm_d.isBoss () && !me) { printf ("threaded_cg_mixed_Mdag: source norm is %17.10e\n", nsrc); printf ("threaded_cg_mixed_Mdag: bfm_d.CGdiagonalMee = %d\n", bfm_d.CGdiagonalMee); } // eo preconditioning // CGdiagonalMee == 2 has an extra Moo^{\dag-1} if (bfm_d.CGdiagonalMee == 0 || bfm_d.CGdiagonalMee == 1) { bfm_d.MooeeInv (src[Even], ta, DaggerYes); // ta == Mee^{\dag-1} src[e] bfm_d.Meo (ta, tb, Odd, DaggerYes); // tb == Moe^\dag Mee^{\dag-1} src[e] bfm_d.axpy (bo, tb, src[Odd], -1.0); // bo == src[o] - Moe^\dag Mee^{\dag-1} src[e] } else if (bfm_d.CGdiagonalMee == 2) { bfm_d.MooeeInv (src[Even], ta, DaggerYes); bfm_d.Meo (ta, tb, Odd, DaggerYes); bfm_d.axpy (tb, tb, src[Odd], -1.0); bfm_d.MooeeInv (tb, bo, DaggerYes); // bo == Moo^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e]) } else { printf ("threaded_cg_mixed_Mdag: Unknown CGdiagonalMee: %d\n", bfm_d.CGdiagonalMee); exit (-1); } // There seems to be no easy way to use an initial guess for this // inversion, so just set the guess to zero. bfm_d.set_zero (ta); // ta = (Mprec^\dag Mprec)^{-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e]) int iter = threaded_cg_mixed_MdagM (ta, bo, bfm_d, bfm_f, max_cycle, itype, evec, eval, N); bfm_d.Mprec (ta, sol[Odd], tb, DaggerNo); // sol[o] = Mprec^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e]) // For CGdiagonalMee == 1 we need to multiply the odd // solution by MooInv^d if (bfm_d.CGdiagonalMee == 1) { bfm_d.MooeeInv (sol[Odd], ta, DaggerYes, Odd); bfm_d.copy (sol[Odd], ta); } bfm_d.Meo (sol[Odd], ta, Even, DaggerYes); // ta == Meo^\dag sol[o] bfm_d.axpy (tb, ta, src[Even], -1.0); // tb == src[e] - Meo^\dag sol[o] bfm_d.MooeeInv (tb, sol[Even], DaggerYes); // sol[e] = Mee^{\dag-1} (src[e] - Meo^\dag sol[o]) double nsol = bfm_d.norm (sol[0]) + bfm_d.norm (sol[1]); // compute final residual Fermion_t tmp[2] = { be, bo }; bfm_d.Munprec (sol, tmp, ta, DaggerYes); double ndiff = 0.; for (int i = 0; i < 2; ++i) { bfm_d.axpy (tb, tmp[i], src[i], -1.0); ndiff += bfm_d.norm (tb); } if (bfm_d.isBoss () && !me) { printf ("threaded_cg_mixed_Mdag: unprec sol norm = %17.10e, residual = %17.10e\n", nsol, sqrt (ndiff / nsrc)); } bfm_d.threadedFreeFermion (be); bfm_d.threadedFreeFermion (bo); bfm_d.threadedFreeFermion (ta); bfm_d.threadedFreeFermion (tb); return iter; } // Inverts unpreconditioned Mdag by using preconditioned MMdag // as the inner solver. This allows us to make use of the initial // guess (so sol needs to be initialized to something reasonable // before calling this function). inline int threaded_cg_mixed_Mdag_guess (Fermion_t sol[2], Fermion_t src[2], bfm_evo < double >&bfm_d, bfm_evo < float >&bfm_f, int max_cycle, cps::InverterType itype = cps::CG, // the following parameters are for deflation multi1d < Fermion_t[2] > *evec = NULL, multi1d < float >*eval = NULL, int N = 0) { int me = bfm_d.thread_barrier (); Fermion_t be = bfm_d.threadedAllocFermion (); Fermion_t bo = bfm_d.threadedAllocFermion (); Fermion_t ta = bfm_d.threadedAllocFermion (); Fermion_t tb = bfm_d.threadedAllocFermion (); double nsrc = bfm_d.norm (src[0]) + bfm_d.norm (src[1]); if (bfm_d.isBoss () && !me) { printf ("threaded_cg_mixed_Mdag: source norm is %17.10e\n", nsrc); printf ("threaded_cg_mixed_Mdag: bfm_d.CGdiagonalMee = %d\n", bfm_d.CGdiagonalMee); } // eo preconditioning // CGdiagonalMee == 2 has an extra Moo^{\dag-1} if (bfm_d.CGdiagonalMee == 0 || bfm_d.CGdiagonalMee == 1) { bfm_d.MooeeInv (src[Even], ta, DaggerYes); // ta == Mee^{\dag-1} src[e] bfm_d.Meo (ta, tb, Odd, DaggerYes); // tb == Moe^\dag Mee^{\dag-1} src[e] bfm_d.axpy (bo, tb, src[Odd], -1.0); // bo == src[o] - Moe^\dag Mee^{\dag-1} src[e] } else if (bfm_d.CGdiagonalMee == 2) { bfm_d.MooeeInv (src[Even], ta, DaggerYes); bfm_d.Meo (ta, tb, Odd, DaggerYes); bfm_d.axpy (tb, tb, src[Odd], -1.0); bfm_d.MooeeInv (tb, bo, DaggerYes); // bo == Moo^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e]) } else { printf ("threaded_cg_mixed_Mdag: Unknown CGdiagonalMee: %d\n", bfm_d.CGdiagonalMee); exit (-1); } // for CGdiagonalMee == 1 the guess needs to get multiplied by Moo^\dag if (bfm_d.CGdiagonalMee == 1) { bfm_d.Mooee (sol[Odd], tb, DaggerYes, Odd); bfm_d.copy (sol[Odd], tb); } // ta = Mprec bo // = Mprec (src[o] - Moe^\dag Mee^{\dag-1} src[e]) bfm_d.Mprec (bo, ta, tb, DaggerNo); // sol[o] = (Mprec Mprec^\dag)^{-1} ta // = (Mprec Mprec^\dag)^{-1} Mprec (src[o] - Moe^\dag Mee^{\dag-1} src[e]) // = Mprec^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e]) int iter = threaded_cg_mixed_MMdag (sol[Odd], ta, bfm_d, bfm_f, max_cycle, itype, evec, eval, N); // For CGdiagonalMee == 1 we need to multiply the odd // solution by MooInv^d if (bfm_d.CGdiagonalMee == 1) { bfm_d.MooeeInv (sol[Odd], ta, DaggerYes, Odd); bfm_d.copy (sol[Odd], ta); } bfm_d.Meo (sol[Odd], ta, Even, DaggerYes); // ta == Meo^\dag sol[o] bfm_d.axpy (tb, ta, src[Even], -1.0); // tb == src[e] - Meo^\dag sol[o] bfm_d.MooeeInv (tb, sol[Even], DaggerYes); // sol[e] = Mee^{\dag-1} (src[e] - Meo^\dag sol[o]) double nsol = bfm_d.norm (sol[0]) + bfm_d.norm (sol[1]); // compute final residual Fermion_t tmp[2] = { be, bo }; bfm_d.Munprec (sol, tmp, ta, DaggerYes); double ndiff = 0.; for (int i = 0; i < 2; ++i) { bfm_d.axpy (tb, tmp[i], src[i], -1.0); ndiff += bfm_d.norm (tb); } if (bfm_d.isBoss () && !me) { printf ("threaded_cg_mixed_Mdag: unprec sol norm = %17.10e, residual = %17.10e\n", nsol, sqrt (ndiff / nsrc)); } bfm_d.threadedFreeFermion (be); bfm_d.threadedFreeFermion (bo); bfm_d.threadedFreeFermion (ta); bfm_d.threadedFreeFermion (tb); return iter; } } #endif inline int threaded_cg_mixed_Mdag(Fermion_t sol[2], Fermion_t src[2], bfm_evo<double> &bfm_d, bfm_evo<float> &bfm_f, int max_cycle, cps::InverterType itype = cps::CG, // the following parameters are for deflation multi1d<Fermion_t[2]> *evec = NULL, multi1d<float> *eval = NULL, int N = 0) { int me = bfm_d.thread_barrier(); Fermion_t be = bfm_d.threadedAllocFermion(); Fermion_t bo = bfm_d.threadedAllocFermion(); Fermion_t ta = bfm_d.threadedAllocFermion(); Fermion_t tb = bfm_d.threadedAllocFermion(); double nsrc = bfm_d.norm(src[0]) + bfm_d.norm(src[1]); if (bfm_d.isBoss() && !me) { printf("threaded_cg_mixed_Mdag: source norm is %17.10e\n", nsrc); printf("threaded_cg_mixed_Mdag: bfm_d.CGdiagonalMee = %d\n", bfm_d.CGdiagonalMee); } // eo preconditioning // CGdiagonalMee == 2 has an extra Moo^{\dag-1} if (bfm_d.CGdiagonalMee == 0 || bfm_d.CGdiagonalMee == 1) { bfm_d.MooeeInv(src[Even], ta, DaggerYes); // ta == Mee^{\dag-1} src[e] bfm_d.Meo(ta, tb, Odd, DaggerYes); // tb == Moe^\dag Mee^{\dag-1} src[e] bfm_d.axpy(bo, tb, src[Odd], -1.0); // bo == src[o] - Moe^\dag Mee^{\dag-1} src[e] } else if (bfm_d.CGdiagonalMee == 2) { bfm_d.MooeeInv(src[Even], ta, DaggerYes); bfm_d.Meo(ta, tb, Odd, DaggerYes); bfm_d.axpy(tb, tb, src[Odd], -1.0); bfm_d.MooeeInv(tb, bo, DaggerYes); // bo == Moo^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e]) } else { printf("threaded_cg_mixed_Mdag: Unknown CGdiagonalMee: %d\n", bfm_d.CGdiagonalMee); exit(-1); } // There seems to be no easy way to use an initial guess for this // inversion, so just set the guess to zero. bfm_d.set_zero(ta); // ta = (Mprec^\dag Mprec)^{-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e]) int iter = threaded_cg_mixed_MdagM(ta, bo, bfm_d, bfm_f, max_cycle, itype, evec, eval, N); bfm_d.Mprec(ta, sol[Odd], tb, DaggerNo); // sol[o] = Mprec^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e]) // For CGdiagonalMee == 1 we need to multiply the odd // solution by MooInv^d if (bfm_d.CGdiagonalMee == 1) { bfm_d.MooeeInv(sol[Odd], ta, DaggerYes, Odd); bfm_d.copy(sol[Odd], ta); } bfm_d.Meo(sol[Odd], ta, Even, DaggerYes); // ta == Meo^\dag sol[o] bfm_d.axpy(tb, ta, src[Even], -1.0); // tb == src[e] - Meo^\dag sol[o] bfm_d.MooeeInv(tb, sol[Even], DaggerYes); // sol[e] = Mee^{\dag-1} (src[e] - Meo^\dag sol[o]) double nsol = bfm_d.norm(sol[0]) + bfm_d.norm(sol[1]); // compute final residual Fermion_t tmp[2] = { be, bo }; bfm_d.Munprec(sol, tmp, ta, DaggerYes); double ndiff = 0.; for (int i = 0; i < 2; ++i) { bfm_d.axpy(tb, tmp[i], src[i], -1.0); ndiff += bfm_d.norm(tb); } if (bfm_d.isBoss() && !me) { printf("threaded_cg_mixed_Mdag: unprec sol norm = %17.10e, residual = %17.10e\n", nsol, sqrt(ndiff / nsrc)); } bfm_d.threadedFreeFermion(be); bfm_d.threadedFreeFermion(bo); bfm_d.threadedFreeFermion(ta); bfm_d.threadedFreeFermion(tb); return iter; } // Inverts unpreconditioned Mdag by using preconditioned MMdag // as the inner solver. This allows us to make use of the initial // guess (so sol needs to be initialized to something reasonable // before calling this function). inline int threaded_cg_mixed_Mdag_guess(Fermion_t sol[2], Fermion_t src[2], bfm_evo<double> &bfm_d, bfm_evo<float> &bfm_f, int max_cycle, cps::InverterType itype = cps::CG, // the following parameters are for deflation multi1d<Fermion_t[2]> *evec = NULL, multi1d<float> *eval = NULL, int N = 0) { int me = bfm_d.thread_barrier(); Fermion_t be = bfm_d.threadedAllocFermion(); Fermion_t bo = bfm_d.threadedAllocFermion(); Fermion_t ta = bfm_d.threadedAllocFermion(); Fermion_t tb = bfm_d.threadedAllocFermion(); double nsrc = bfm_d.norm(src[0]) + bfm_d.norm(src[1]); if (bfm_d.isBoss() && !me) { printf("threaded_cg_mixed_Mdag: source norm is %17.10e\n", nsrc); printf("threaded_cg_mixed_Mdag: bfm_d.CGdiagonalMee = %d\n", bfm_d.CGdiagonalMee); } // eo preconditioning // CGdiagonalMee == 2 has an extra Moo^{\dag-1} if (bfm_d.CGdiagonalMee == 0 || bfm_d.CGdiagonalMee == 1) { bfm_d.MooeeInv(src[Even], ta, DaggerYes); // ta == Mee^{\dag-1} src[e] bfm_d.Meo(ta, tb, Odd, DaggerYes); // tb == Moe^\dag Mee^{\dag-1} src[e] bfm_d.axpy(bo, tb, src[Odd], -1.0); // bo == src[o] - Moe^\dag Mee^{\dag-1} src[e] } else if (bfm_d.CGdiagonalMee == 2) { bfm_d.MooeeInv(src[Even], ta, DaggerYes); bfm_d.Meo(ta, tb, Odd, DaggerYes); bfm_d.axpy(tb, tb, src[Odd], -1.0); bfm_d.MooeeInv(tb, bo, DaggerYes); // bo == Moo^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e]) } else { printf("threaded_cg_mixed_Mdag: Unknown CGdiagonalMee: %d\n", bfm_d.CGdiagonalMee); exit(-1); } // for CGdiagonalMee == 1 the guess needs to get multiplied by Moo^\dag if (bfm_d.CGdiagonalMee == 1) { bfm_d.Mooee(sol[Odd], tb, DaggerYes, Odd); bfm_d.copy(sol[Odd], tb); } // ta = Mprec bo // = Mprec (src[o] - Moe^\dag Mee^{\dag-1} src[e]) bfm_d.Mprec(bo, ta, tb, DaggerNo); // sol[o] = (Mprec Mprec^\dag)^{-1} ta // = (Mprec Mprec^\dag)^{-1} Mprec (src[o] - Moe^\dag Mee^{\dag-1} src[e]) // = Mprec^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e]) int iter = threaded_cg_mixed_MMdag(sol[Odd], ta, bfm_d, bfm_f, max_cycle, itype, evec, eval, N); // For CGdiagonalMee == 1 we need to multiply the odd // solution by MooInv^d if (bfm_d.CGdiagonalMee == 1) { bfm_d.MooeeInv(sol[Odd], ta, DaggerYes, Odd); bfm_d.copy(sol[Odd], ta); } bfm_d.Meo(sol[Odd], ta, Even, DaggerYes); // ta == Meo^\dag sol[o] bfm_d.axpy(tb, ta, src[Even], -1.0); // tb == src[e] - Meo^\dag sol[o] bfm_d.MooeeInv(tb, sol[Even], DaggerYes); // sol[e] = Mee^{\dag-1} (src[e] - Meo^\dag sol[o]) double nsol = bfm_d.norm(sol[0]) + bfm_d.norm(sol[1]); // compute final residual Fermion_t tmp[2] = { be, bo }; bfm_d.Munprec(sol, tmp, ta, DaggerYes); double ndiff = 0.; for (int i = 0; i < 2; ++i) { bfm_d.axpy(tb, tmp[i], src[i], -1.0); ndiff += bfm_d.norm(tb); } if (bfm_d.isBoss() && !me) { printf("threaded_cg_mixed_Mdag: unprec sol norm = %17.10e, residual = %17.10e\n", nsol, sqrt(ndiff / nsrc)); } bfm_d.threadedFreeFermion(be); bfm_d.threadedFreeFermion(bo); bfm_d.threadedFreeFermion(ta); bfm_d.threadedFreeFermion(tb); return iter; } } CPS_START_NAMESPACE #if 1 //Controls the version of the multi-shift algorithm used. The user can define different versions to use in different environments, for example if you //want to use an approximate method within the molecular dynamics evolution. //The current environment must be set manually. It defaults to Generic class MultiShiftCGcontroller { public: enum Mode { SINGLE_PREC, DOUBLE_PREC, SINGLE_PREC_PLUS_OUTER_DEFECT_CORRECTION_LOOP, //Single precision multi-shift followed by single precision restarted defect correction loop over poles SINGLE_PREC_AS_DOUBLE_PREC_GUESS, //Single precision multi-shift followed by double precision multi-shift using the single prec results as a guess (using Osborn's method) SINGLE_PREC_RESTARTED_AS_DOUBLE_PREC_GUESS, //Restarted single precision multi-shift with defect correction followed by double precision multi-shift using the single prec results as a guess (also using Osborn's method) SINGLE_PREC_RELIABLE_UPDATE_PLUS_OUTER_DEFECT_CORRECTION_LOOP, //Single precision multi-shift with reliable update followed by single precision restarted defect correction loop over poles NMultiShiftCGMode }; enum Environment { MolecularDynamics, EnergyCalculation, Heatbath, Generic, NMultiShiftEnvironment }; private: Mode environ_mode[(int) NMultiShiftEnvironment]; Environment current_environment; double minimum_single_prec_residual; //For variants with an initial single precision solve, the stopping conditions are set equal to the larger of the double precision residual and this bound. Does not apply //to the reliable update version. int reliable_update_freq; //Used in versions with reliable update int max_defect_correction_cycles; public: MultiShiftCGcontroller ():current_environment (Generic), minimum_single_prec_residual (1e-08), reliable_update_freq (100), max_defect_correction_cycles (500) { for (int i = 0; i < (int) NMultiShiftEnvironment; i++) environ_mode[i] = DOUBLE_PREC; } void setEnvironmentMode (const Environment & environ, const Mode & mode) { environ_mode[(int) environ] = mode; } void setEnvironment (const Environment & environ) { current_environment = environ; } const Mode & getMode () const { return environ_mode[(int) current_environment]; } void setMinimumSinglePrecResidual (const double &r) { minimum_single_prec_residual = r; } void setReliableUpdateFreq (const int &f) { reliable_update_freq = f; } void setMaximumDefectCorrectionCycles (const int &c) { max_defect_correction_cycles = c; } int MInv (Fermion_t * sol_multi, Fermion_t src, Float * shift, int Nshift, Float * mresidual, Float * alpha, int single, bfm_evo < double >&bd, bfm_evo < float >&bf) { const Mode & mode = getMode (); int iter; if (mode == SINGLE_PREC) { //Note, this uses the residuals specified in the cg_arg without modification #pragma omp parallel { Fermion_t src_f = bf.threadedAllocFermion (); Fermion_t sol_f[Nshift]; for (int i = 0; i < Nshift; i++) sol_f[i] = bf.threadedAllocFermion (); mixed_cg::threaded_convFermion (src_f, src, bf, bd); mixed_cg::switch_comm (bf, bd); iter = bf.CGNE_prec_MdagM_multi_shift (sol_f, src_f, shift, alpha, Nshift, mresidual, single); mixed_cg::switch_comm (bd, bf); for (int i = 0; i < Nshift; i++) { mixed_cg::threaded_convFermion (sol_multi[i], sol_f[i], bd, bf); bf.threadedFreeFermion (sol_f[i]); } bf.threadedFreeFermion (src_f); } } else if (mode == DOUBLE_PREC) { #pragma omp parallel { iter = bd.CGNE_prec_MdagM_multi_shift (sol_multi, src, shift, alpha, Nshift, mresidual, single); } } else if (mode == SINGLE_PREC_PLUS_OUTER_DEFECT_CORRECTION_LOOP) { double fresidual[Nshift]; //residuals for initial single prec solve for (int s = 0; s < Nshift; s++) fresidual[s] = (mresidual[s] >= minimum_single_prec_residual ? mresidual[s] : minimum_single_prec_residual); #pragma omp parallel { iter = mixed_cg:: threaded_cg_mixed_defect_correction_multi_shift_MdagM (sol_multi, src, shift, alpha, bd, bf, Nshift, mresidual, fresidual, single, max_defect_correction_cycles); } } else if (mode == SINGLE_PREC_AS_DOUBLE_PREC_GUESS) { double fresidual[Nshift]; //residuals for initial single prec solve for (int s = 0; s < Nshift; s++) fresidual[s] = (mresidual[s] >= minimum_single_prec_residual ? mresidual[s] : minimum_single_prec_residual); #pragma omp parallel { iter = mixed_cg:: threaded_cg_mixed_single_prec_as_guess_multi_shift_MdagM (sol_multi, src, shift, alpha, Nshift, mresidual, fresidual, single, bd, bf); } } else if (mode == SINGLE_PREC_RESTARTED_AS_DOUBLE_PREC_GUESS) { double fresidual[Nshift]; //residuals for initial single prec solve for (int s = 0; s < Nshift; s++) fresidual[s] = (mresidual[s] >= minimum_single_prec_residual ? mresidual[s] : minimum_single_prec_residual); #pragma omp parallel { iter = mixed_cg::threaded_cg_mixed_restarted_multi_shift_MdagM (sol_multi, src, shift, alpha, Nshift, mresidual, fresidual, single, bd, bf, max_defect_correction_cycles); } } else if (mode == SINGLE_PREC_RELIABLE_UPDATE_PLUS_OUTER_DEFECT_CORRECTION_LOOP) { #pragma omp parallel { iter = mixed_cg:: threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction (sol_multi, src, shift, alpha, Nshift, mresidual, single, bf, bd, reliable_update_freq, -1, max_defect_correction_cycles); } } else ERR.General ("_MultiShiftCGargs", "MInv(..)", "Unknown multi-shift mode\n"); return iter; } }; extern MultiShiftCGcontroller MultiShiftController; //global instance (created in fbfm.C) #endif CPS_END_NAMESPACE #endif
jacobi25_2d-a.pluto.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> /* * N is the number of points * T is the number of timesteps */ #ifdef HAS_DECLS #include "decls.h" #else #define N 16000L #define T 16000L #endif #define NUM_FP_OPS 10 /* Define our arrays */ double A[2][N][N]; double total=0; double sum_err_sqr=0; int chtotal=0; int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; return x->tv_sec < y->tv_sec; } int main(int argc, char * argv[]) { long int t, i, j, k; const int BASE = 1024; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0; printf("Number of points = %ld\t|Number of timesteps = %ld\t", N*N, T); /* Initialization */ srand(42); // seed with a constant value to verify results for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[0][i][j] = 1.0 * (rand() % BASE); } } #ifdef TIME gettimeofday(&start, 0); #endif // #undef N // #define N 8000L #undef T #define T 8000 /* Copyright (C) 1991-2012 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* We do support the IEC 559 math functionality, real and complex. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((N >= 5) && (T >= 1)) { for (t1=-1;t1<=floord(T-1,2);t1++) { lbp=ceild(t1,2); ubp=min(floord(4*T+N-5,16),floord(8*t1+N+4,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-63,64)),ceild(16*t2-N-505,512));t3<=min(min(floord(4*T+N-5,512),floord(8*t1+N+11,512)),floord(4*t1+8*t2+N+9,512));t3++) { for (t4=max(max(0,ceild(16*t2-N+1,4)),2*t1);t4<=floord(16*t2-N+2,4);t4++) { for (t5=16*t2;t5<=4*t4+N-1;t5++) { for (t6=max(512*t3,4*t4+4);t6<=min(512*t3+511,4*t4+N-1);t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } if ((t1 == 2*t2) && (t1 >= ceild(512*t3-N+1,8))) { for (t6=max(512*t3,8*t1+4);t6<=min(512*t3+511,8*t1+N-1);t6++) { if (t1%2 == 0) { A[0][2][(-8*t1+t6-2)] = 0.04*( A[1][2 -2][(-8*t1+t6-2)-2] + A[1][2 -2][(-8*t1+t6-2)-1] + A[1][2 -2][(-8*t1+t6-2)] + A[1][2 -2][(-8*t1+t6-2)+1] + A[1][2 -2][(-8*t1+t6-2)+2] + A[1][2 -1][(-8*t1+t6-2)-2] + A[1][2 -1][(-8*t1+t6-2)-1] + A[1][2 -1][(-8*t1+t6-2)] + A[1][2 -1][(-8*t1+t6-2)+1] + A[1][2 -1][(-8*t1+t6-2)+2] + A[1][2 ][(-8*t1+t6-2)-2] + A[1][2 ][(-8*t1+t6-2)-1] + A[1][2 ][(-8*t1+t6-2)] + A[1][2 ][(-8*t1+t6-2)+1] + A[1][2 ][(-8*t1+t6-2)+2] + A[1][2 +1][(-8*t1+t6-2)-2] + A[1][2 +1][(-8*t1+t6-2)-1] + A[1][2 +1][(-8*t1+t6-2)] + A[1][2 +1][(-8*t1+t6-2)+1] + A[1][2 +1][(-8*t1+t6-2)+2] + A[1][2 +2][(-8*t1+t6-2)-2] + A[1][2 +2][(-8*t1+t6-2)-1] + A[1][2 +2][(-8*t1+t6-2)] + A[1][2 +2][(-8*t1+t6-2)+1] + A[1][2 +2][(-8*t1+t6-2)+2] );; } } } for (t4=max(max(max(max(0,ceild(16*t2-N+3,4)),ceild(512*t3-N+1,4)),2*t1),4*t1-4*t2+1);t4<=floord(512*t3-N+2,4);t4++) { for (t5=max(max(16*t2,4*t4+4),-16*t1+16*t2+8*t4-11);t5<=min(16*t2+15,-16*t1+16*t2+8*t4+4);t5++) { for (t6=512*t3;t6<=4*t4+N-1;t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } if ((N >= 7) && (t1 == 2*t2) && (t1 <= min(floord(T-2,2),floord(512*t3-N+509,8))) && (t1 >= 64*t3)) { for (t5=8*t1+6;t5<=8*t1+7;t5++) { for (t6=8*t1+6;t6<=8*t1+N+1;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } } } for (t6=8*t1+6;t6<=8*t1+7;t6++) { if (t1%2 == 0) { A[1][4][(-8*t1+t6-4)] = 0.04*( A[0][4 -2][(-8*t1+t6-4)-2] + A[0][4 -2][(-8*t1+t6-4)-1] + A[0][4 -2][(-8*t1+t6-4)] + A[0][4 -2][(-8*t1+t6-4)+1] + A[0][4 -2][(-8*t1+t6-4)+2] + A[0][4 -1][(-8*t1+t6-4)-2] + A[0][4 -1][(-8*t1+t6-4)-1] + A[0][4 -1][(-8*t1+t6-4)] + A[0][4 -1][(-8*t1+t6-4)+1] + A[0][4 -1][(-8*t1+t6-4)+2] + A[0][4 ][(-8*t1+t6-4)-2] + A[0][4 ][(-8*t1+t6-4)-1] + A[0][4 ][(-8*t1+t6-4)] + A[0][4 ][(-8*t1+t6-4)+1] + A[0][4 ][(-8*t1+t6-4)+2] + A[0][4 +1][(-8*t1+t6-4)-2] + A[0][4 +1][(-8*t1+t6-4)-1] + A[0][4 +1][(-8*t1+t6-4)] + A[0][4 +1][(-8*t1+t6-4)+1] + A[0][4 +1][(-8*t1+t6-4)+2] + A[0][4 +2][(-8*t1+t6-4)-2] + A[0][4 +2][(-8*t1+t6-4)-1] + A[0][4 +2][(-8*t1+t6-4)] + A[0][4 +2][(-8*t1+t6-4)+1] + A[0][4 +2][(-8*t1+t6-4)+2] );; } } for (t6=8*t1+8;t6<=8*t1+N+1;t6++) { if (t1%2 == 0) { A[1][4][(-8*t1+t6-4)] = 0.04*( A[0][4 -2][(-8*t1+t6-4)-2] + A[0][4 -2][(-8*t1+t6-4)-1] + A[0][4 -2][(-8*t1+t6-4)] + A[0][4 -2][(-8*t1+t6-4)+1] + A[0][4 -2][(-8*t1+t6-4)+2] + A[0][4 -1][(-8*t1+t6-4)-2] + A[0][4 -1][(-8*t1+t6-4)-1] + A[0][4 -1][(-8*t1+t6-4)] + A[0][4 -1][(-8*t1+t6-4)+1] + A[0][4 -1][(-8*t1+t6-4)+2] + A[0][4 ][(-8*t1+t6-4)-2] + A[0][4 ][(-8*t1+t6-4)-1] + A[0][4 ][(-8*t1+t6-4)] + A[0][4 ][(-8*t1+t6-4)+1] + A[0][4 ][(-8*t1+t6-4)+2] + A[0][4 +1][(-8*t1+t6-4)-2] + A[0][4 +1][(-8*t1+t6-4)-1] + A[0][4 +1][(-8*t1+t6-4)] + A[0][4 +1][(-8*t1+t6-4)+1] + A[0][4 +1][(-8*t1+t6-4)+2] + A[0][4 +2][(-8*t1+t6-4)-2] + A[0][4 +2][(-8*t1+t6-4)-1] + A[0][4 +2][(-8*t1+t6-4)] + A[0][4 +2][(-8*t1+t6-4)+1] + A[0][4 +2][(-8*t1+t6-4)+2] );; } if (t1%2 == 0) { A[0][2][(-8*t1+t6-6)] = 0.04*( A[1][2 -2][(-8*t1+t6-6)-2] + A[1][2 -2][(-8*t1+t6-6)-1] + A[1][2 -2][(-8*t1+t6-6)] + A[1][2 -2][(-8*t1+t6-6)+1] + A[1][2 -2][(-8*t1+t6-6)+2] + A[1][2 -1][(-8*t1+t6-6)-2] + A[1][2 -1][(-8*t1+t6-6)-1] + A[1][2 -1][(-8*t1+t6-6)] + A[1][2 -1][(-8*t1+t6-6)+1] + A[1][2 -1][(-8*t1+t6-6)+2] + A[1][2 ][(-8*t1+t6-6)-2] + A[1][2 ][(-8*t1+t6-6)-1] + A[1][2 ][(-8*t1+t6-6)] + A[1][2 ][(-8*t1+t6-6)+1] + A[1][2 ][(-8*t1+t6-6)+2] + A[1][2 +1][(-8*t1+t6-6)-2] + A[1][2 +1][(-8*t1+t6-6)-1] + A[1][2 +1][(-8*t1+t6-6)] + A[1][2 +1][(-8*t1+t6-6)+1] + A[1][2 +1][(-8*t1+t6-6)+2] + A[1][2 +2][(-8*t1+t6-6)-2] + A[1][2 +2][(-8*t1+t6-6)-1] + A[1][2 +2][(-8*t1+t6-6)] + A[1][2 +2][(-8*t1+t6-6)+1] + A[1][2 +2][(-8*t1+t6-6)+2] );; } } for (t6=8*t1+N+2;t6<=min(512*t3+511,8*t1+N+3);t6++) { if (t1%2 == 0) { A[0][2][(-8*t1+t6-6)] = 0.04*( A[1][2 -2][(-8*t1+t6-6)-2] + A[1][2 -2][(-8*t1+t6-6)-1] + A[1][2 -2][(-8*t1+t6-6)] + A[1][2 -2][(-8*t1+t6-6)+1] + A[1][2 -2][(-8*t1+t6-6)+2] + A[1][2 -1][(-8*t1+t6-6)-2] + A[1][2 -1][(-8*t1+t6-6)-1] + A[1][2 -1][(-8*t1+t6-6)] + A[1][2 -1][(-8*t1+t6-6)+1] + A[1][2 -1][(-8*t1+t6-6)+2] + A[1][2 ][(-8*t1+t6-6)-2] + A[1][2 ][(-8*t1+t6-6)-1] + A[1][2 ][(-8*t1+t6-6)] + A[1][2 ][(-8*t1+t6-6)+1] + A[1][2 ][(-8*t1+t6-6)+2] + A[1][2 +1][(-8*t1+t6-6)-2] + A[1][2 +1][(-8*t1+t6-6)-1] + A[1][2 +1][(-8*t1+t6-6)] + A[1][2 +1][(-8*t1+t6-6)+1] + A[1][2 +1][(-8*t1+t6-6)+2] + A[1][2 +2][(-8*t1+t6-6)-2] + A[1][2 +2][(-8*t1+t6-6)-1] + A[1][2 +2][(-8*t1+t6-6)] + A[1][2 +2][(-8*t1+t6-6)+1] + A[1][2 +2][(-8*t1+t6-6)+2] );; } } for (t5=8*t1+9;t5<=min(8*t1+12,8*t1+N+3);t5++) { for (t6=8*t1+8;t6<=min(512*t3+511,8*t1+N+3);t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } } if ((t1 == 2*t2) && (t1 <= min(min(floord(T-2,2),floord(512*t3-N+509,8)),64*t3-2)) && (t1 >= ceild(512*t3-N-1,8))) { for (t5=8*t1+6;t5<=8*t1+7;t5++) { for (t6=512*t3;t6<=8*t1+N+1;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } } } for (t6=512*t3;t6<=8*t1+N+1;t6++) { if (t1%2 == 0) { A[1][4][(-8*t1+t6-4)] = 0.04*( A[0][4 -2][(-8*t1+t6-4)-2] + A[0][4 -2][(-8*t1+t6-4)-1] + A[0][4 -2][(-8*t1+t6-4)] + A[0][4 -2][(-8*t1+t6-4)+1] + A[0][4 -2][(-8*t1+t6-4)+2] + A[0][4 -1][(-8*t1+t6-4)-2] + A[0][4 -1][(-8*t1+t6-4)-1] + A[0][4 -1][(-8*t1+t6-4)] + A[0][4 -1][(-8*t1+t6-4)+1] + A[0][4 -1][(-8*t1+t6-4)+2] + A[0][4 ][(-8*t1+t6-4)-2] + A[0][4 ][(-8*t1+t6-4)-1] + A[0][4 ][(-8*t1+t6-4)] + A[0][4 ][(-8*t1+t6-4)+1] + A[0][4 ][(-8*t1+t6-4)+2] + A[0][4 +1][(-8*t1+t6-4)-2] + A[0][4 +1][(-8*t1+t6-4)-1] + A[0][4 +1][(-8*t1+t6-4)] + A[0][4 +1][(-8*t1+t6-4)+1] + A[0][4 +1][(-8*t1+t6-4)+2] + A[0][4 +2][(-8*t1+t6-4)-2] + A[0][4 +2][(-8*t1+t6-4)-1] + A[0][4 +2][(-8*t1+t6-4)] + A[0][4 +2][(-8*t1+t6-4)+1] + A[0][4 +2][(-8*t1+t6-4)+2] );; } if (t1%2 == 0) { A[0][2][(-8*t1+t6-6)] = 0.04*( A[1][2 -2][(-8*t1+t6-6)-2] + A[1][2 -2][(-8*t1+t6-6)-1] + A[1][2 -2][(-8*t1+t6-6)] + A[1][2 -2][(-8*t1+t6-6)+1] + A[1][2 -2][(-8*t1+t6-6)+2] + A[1][2 -1][(-8*t1+t6-6)-2] + A[1][2 -1][(-8*t1+t6-6)-1] + A[1][2 -1][(-8*t1+t6-6)] + A[1][2 -1][(-8*t1+t6-6)+1] + A[1][2 -1][(-8*t1+t6-6)+2] + A[1][2 ][(-8*t1+t6-6)-2] + A[1][2 ][(-8*t1+t6-6)-1] + A[1][2 ][(-8*t1+t6-6)] + A[1][2 ][(-8*t1+t6-6)+1] + A[1][2 ][(-8*t1+t6-6)+2] + A[1][2 +1][(-8*t1+t6-6)-2] + A[1][2 +1][(-8*t1+t6-6)-1] + A[1][2 +1][(-8*t1+t6-6)] + A[1][2 +1][(-8*t1+t6-6)+1] + A[1][2 +1][(-8*t1+t6-6)+2] + A[1][2 +2][(-8*t1+t6-6)-2] + A[1][2 +2][(-8*t1+t6-6)-1] + A[1][2 +2][(-8*t1+t6-6)] + A[1][2 +2][(-8*t1+t6-6)+1] + A[1][2 +2][(-8*t1+t6-6)+2] );; } } for (t6=8*t1+N+2;t6<=min(512*t3+511,8*t1+N+3);t6++) { if (t1%2 == 0) { A[0][2][(-8*t1+t6-6)] = 0.04*( A[1][2 -2][(-8*t1+t6-6)-2] + A[1][2 -2][(-8*t1+t6-6)-1] + A[1][2 -2][(-8*t1+t6-6)] + A[1][2 -2][(-8*t1+t6-6)+1] + A[1][2 -2][(-8*t1+t6-6)+2] + A[1][2 -1][(-8*t1+t6-6)-2] + A[1][2 -1][(-8*t1+t6-6)-1] + A[1][2 -1][(-8*t1+t6-6)] + A[1][2 -1][(-8*t1+t6-6)+1] + A[1][2 -1][(-8*t1+t6-6)+2] + A[1][2 ][(-8*t1+t6-6)-2] + A[1][2 ][(-8*t1+t6-6)-1] + A[1][2 ][(-8*t1+t6-6)] + A[1][2 ][(-8*t1+t6-6)+1] + A[1][2 ][(-8*t1+t6-6)+2] + A[1][2 +1][(-8*t1+t6-6)-2] + A[1][2 +1][(-8*t1+t6-6)-1] + A[1][2 +1][(-8*t1+t6-6)] + A[1][2 +1][(-8*t1+t6-6)+1] + A[1][2 +1][(-8*t1+t6-6)+2] + A[1][2 +2][(-8*t1+t6-6)-2] + A[1][2 +2][(-8*t1+t6-6)-1] + A[1][2 +2][(-8*t1+t6-6)] + A[1][2 +2][(-8*t1+t6-6)+1] + A[1][2 +2][(-8*t1+t6-6)+2] );; } } for (t5=8*t1+9;t5<=8*t1+12;t5++) { for (t6=512*t3;t6<=min(512*t3+511,8*t1+N+3);t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } } if ((t1 == 2*t2) && (t1 <= min(min(floord(T-3,2),floord(512*t3-N+505,8)),64*t3-2)) && (t1 >= ceild(512*t3-N-5,8))) { for (t5=8*t1+10;t5<=8*t1+11;t5++) { for (t6=512*t3;t6<=8*t1+N+5;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } } for (t5=8*t1+12;t5<=8*t1+15;t5++) { for (t6=512*t3;t6<=8*t1+N+5;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } if (t1%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } for (t6=8*t1+N+6;t6<=min(512*t3+511,8*t1+N+7);t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } } if ((t1 == 2*t2) && (t1 <= floord(T-2,2)) && (t1 >= max(ceild(512*t3-N+510,8),64*t3))) { for (t5=8*t1+6;t5<=8*t1+7;t5++) { for (t6=8*t1+6;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } } } for (t6=8*t1+6;t6<=8*t1+7;t6++) { if (t1%2 == 0) { A[1][4][(-8*t1+t6-4)] = 0.04*( A[0][4 -2][(-8*t1+t6-4)-2] + A[0][4 -2][(-8*t1+t6-4)-1] + A[0][4 -2][(-8*t1+t6-4)] + A[0][4 -2][(-8*t1+t6-4)+1] + A[0][4 -2][(-8*t1+t6-4)+2] + A[0][4 -1][(-8*t1+t6-4)-2] + A[0][4 -1][(-8*t1+t6-4)-1] + A[0][4 -1][(-8*t1+t6-4)] + A[0][4 -1][(-8*t1+t6-4)+1] + A[0][4 -1][(-8*t1+t6-4)+2] + A[0][4 ][(-8*t1+t6-4)-2] + A[0][4 ][(-8*t1+t6-4)-1] + A[0][4 ][(-8*t1+t6-4)] + A[0][4 ][(-8*t1+t6-4)+1] + A[0][4 ][(-8*t1+t6-4)+2] + A[0][4 +1][(-8*t1+t6-4)-2] + A[0][4 +1][(-8*t1+t6-4)-1] + A[0][4 +1][(-8*t1+t6-4)] + A[0][4 +1][(-8*t1+t6-4)+1] + A[0][4 +1][(-8*t1+t6-4)+2] + A[0][4 +2][(-8*t1+t6-4)-2] + A[0][4 +2][(-8*t1+t6-4)-1] + A[0][4 +2][(-8*t1+t6-4)] + A[0][4 +2][(-8*t1+t6-4)+1] + A[0][4 +2][(-8*t1+t6-4)+2] );; } } for (t6=8*t1+8;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][4][(-8*t1+t6-4)] = 0.04*( A[0][4 -2][(-8*t1+t6-4)-2] + A[0][4 -2][(-8*t1+t6-4)-1] + A[0][4 -2][(-8*t1+t6-4)] + A[0][4 -2][(-8*t1+t6-4)+1] + A[0][4 -2][(-8*t1+t6-4)+2] + A[0][4 -1][(-8*t1+t6-4)-2] + A[0][4 -1][(-8*t1+t6-4)-1] + A[0][4 -1][(-8*t1+t6-4)] + A[0][4 -1][(-8*t1+t6-4)+1] + A[0][4 -1][(-8*t1+t6-4)+2] + A[0][4 ][(-8*t1+t6-4)-2] + A[0][4 ][(-8*t1+t6-4)-1] + A[0][4 ][(-8*t1+t6-4)] + A[0][4 ][(-8*t1+t6-4)+1] + A[0][4 ][(-8*t1+t6-4)+2] + A[0][4 +1][(-8*t1+t6-4)-2] + A[0][4 +1][(-8*t1+t6-4)-1] + A[0][4 +1][(-8*t1+t6-4)] + A[0][4 +1][(-8*t1+t6-4)+1] + A[0][4 +1][(-8*t1+t6-4)+2] + A[0][4 +2][(-8*t1+t6-4)-2] + A[0][4 +2][(-8*t1+t6-4)-1] + A[0][4 +2][(-8*t1+t6-4)] + A[0][4 +2][(-8*t1+t6-4)+1] + A[0][4 +2][(-8*t1+t6-4)+2] );; } if (t1%2 == 0) { A[0][2][(-8*t1+t6-6)] = 0.04*( A[1][2 -2][(-8*t1+t6-6)-2] + A[1][2 -2][(-8*t1+t6-6)-1] + A[1][2 -2][(-8*t1+t6-6)] + A[1][2 -2][(-8*t1+t6-6)+1] + A[1][2 -2][(-8*t1+t6-6)+2] + A[1][2 -1][(-8*t1+t6-6)-2] + A[1][2 -1][(-8*t1+t6-6)-1] + A[1][2 -1][(-8*t1+t6-6)] + A[1][2 -1][(-8*t1+t6-6)+1] + A[1][2 -1][(-8*t1+t6-6)+2] + A[1][2 ][(-8*t1+t6-6)-2] + A[1][2 ][(-8*t1+t6-6)-1] + A[1][2 ][(-8*t1+t6-6)] + A[1][2 ][(-8*t1+t6-6)+1] + A[1][2 ][(-8*t1+t6-6)+2] + A[1][2 +1][(-8*t1+t6-6)-2] + A[1][2 +1][(-8*t1+t6-6)-1] + A[1][2 +1][(-8*t1+t6-6)] + A[1][2 +1][(-8*t1+t6-6)+1] + A[1][2 +1][(-8*t1+t6-6)+2] + A[1][2 +2][(-8*t1+t6-6)-2] + A[1][2 +2][(-8*t1+t6-6)-1] + A[1][2 +2][(-8*t1+t6-6)] + A[1][2 +2][(-8*t1+t6-6)+1] + A[1][2 +2][(-8*t1+t6-6)+2] );; } } for (t5=8*t1+9;t5<=8*t1+12;t5++) { for (t6=8*t1+8;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } } if ((t1 == 2*t2) && (t1 <= min(floord(T-2,2),64*t3-2)) && (t1 >= ceild(512*t3-N+510,8))) { for (t5=8*t1+6;t5<=8*t1+7;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } } } for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][4][(-8*t1+t6-4)] = 0.04*( A[0][4 -2][(-8*t1+t6-4)-2] + A[0][4 -2][(-8*t1+t6-4)-1] + A[0][4 -2][(-8*t1+t6-4)] + A[0][4 -2][(-8*t1+t6-4)+1] + A[0][4 -2][(-8*t1+t6-4)+2] + A[0][4 -1][(-8*t1+t6-4)-2] + A[0][4 -1][(-8*t1+t6-4)-1] + A[0][4 -1][(-8*t1+t6-4)] + A[0][4 -1][(-8*t1+t6-4)+1] + A[0][4 -1][(-8*t1+t6-4)+2] + A[0][4 ][(-8*t1+t6-4)-2] + A[0][4 ][(-8*t1+t6-4)-1] + A[0][4 ][(-8*t1+t6-4)] + A[0][4 ][(-8*t1+t6-4)+1] + A[0][4 ][(-8*t1+t6-4)+2] + A[0][4 +1][(-8*t1+t6-4)-2] + A[0][4 +1][(-8*t1+t6-4)-1] + A[0][4 +1][(-8*t1+t6-4)] + A[0][4 +1][(-8*t1+t6-4)+1] + A[0][4 +1][(-8*t1+t6-4)+2] + A[0][4 +2][(-8*t1+t6-4)-2] + A[0][4 +2][(-8*t1+t6-4)-1] + A[0][4 +2][(-8*t1+t6-4)] + A[0][4 +2][(-8*t1+t6-4)+1] + A[0][4 +2][(-8*t1+t6-4)+2] );; } if (t1%2 == 0) { A[0][2][(-8*t1+t6-6)] = 0.04*( A[1][2 -2][(-8*t1+t6-6)-2] + A[1][2 -2][(-8*t1+t6-6)-1] + A[1][2 -2][(-8*t1+t6-6)] + A[1][2 -2][(-8*t1+t6-6)+1] + A[1][2 -2][(-8*t1+t6-6)+2] + A[1][2 -1][(-8*t1+t6-6)-2] + A[1][2 -1][(-8*t1+t6-6)-1] + A[1][2 -1][(-8*t1+t6-6)] + A[1][2 -1][(-8*t1+t6-6)+1] + A[1][2 -1][(-8*t1+t6-6)+2] + A[1][2 ][(-8*t1+t6-6)-2] + A[1][2 ][(-8*t1+t6-6)-1] + A[1][2 ][(-8*t1+t6-6)] + A[1][2 ][(-8*t1+t6-6)+1] + A[1][2 ][(-8*t1+t6-6)+2] + A[1][2 +1][(-8*t1+t6-6)-2] + A[1][2 +1][(-8*t1+t6-6)-1] + A[1][2 +1][(-8*t1+t6-6)] + A[1][2 +1][(-8*t1+t6-6)+1] + A[1][2 +1][(-8*t1+t6-6)+2] + A[1][2 +2][(-8*t1+t6-6)-2] + A[1][2 +2][(-8*t1+t6-6)-1] + A[1][2 +2][(-8*t1+t6-6)] + A[1][2 +2][(-8*t1+t6-6)+1] + A[1][2 +2][(-8*t1+t6-6)+2] );; } } for (t5=8*t1+9;t5<=8*t1+12;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } } if ((t1 == 2*t2) && (t1 <= min(floord(T-3,2),64*t3-2)) && (t1 >= ceild(512*t3-N+506,8))) { for (t5=8*t1+10;t5<=8*t1+11;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } } for (t5=8*t1+12;t5<=8*t1+15;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } if (t1%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } } if ((N == 6) && (t1 == 2*t2) && (t1 <= floord(T-2,2))) { for (t5=8*t1+6;t5<=8*t1+7;t5++) { for (t6=8*t1+6;t6<=8*t1+7;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } } } for (t6=8*t1+8;t6<=8*t1+9;t6++) { if (t1%2 == 0) { A[0][2][(-8*t1+t6-6)] = 0.04*( A[1][2 -2][(-8*t1+t6-6)-2] + A[1][2 -2][(-8*t1+t6-6)-1] + A[1][2 -2][(-8*t1+t6-6)] + A[1][2 -2][(-8*t1+t6-6)+1] + A[1][2 -2][(-8*t1+t6-6)+2] + A[1][2 -1][(-8*t1+t6-6)-2] + A[1][2 -1][(-8*t1+t6-6)-1] + A[1][2 -1][(-8*t1+t6-6)] + A[1][2 -1][(-8*t1+t6-6)+1] + A[1][2 -1][(-8*t1+t6-6)+2] + A[1][2 ][(-8*t1+t6-6)-2] + A[1][2 ][(-8*t1+t6-6)-1] + A[1][2 ][(-8*t1+t6-6)] + A[1][2 ][(-8*t1+t6-6)+1] + A[1][2 ][(-8*t1+t6-6)+2] + A[1][2 +1][(-8*t1+t6-6)-2] + A[1][2 +1][(-8*t1+t6-6)-1] + A[1][2 +1][(-8*t1+t6-6)] + A[1][2 +1][(-8*t1+t6-6)+1] + A[1][2 +1][(-8*t1+t6-6)+2] + A[1][2 +2][(-8*t1+t6-6)-2] + A[1][2 +2][(-8*t1+t6-6)-1] + A[1][2 +2][(-8*t1+t6-6)] + A[1][2 +2][(-8*t1+t6-6)+1] + A[1][2 +2][(-8*t1+t6-6)+2] );; } } for (t6=8*t1+8;t6<=8*t1+9;t6++) { if (t1%2 == 0) { A[0][3][(-8*t1+t6-6)] = 0.04*( A[1][3 -2][(-8*t1+t6-6)-2] + A[1][3 -2][(-8*t1+t6-6)-1] + A[1][3 -2][(-8*t1+t6-6)] + A[1][3 -2][(-8*t1+t6-6)+1] + A[1][3 -2][(-8*t1+t6-6)+2] + A[1][3 -1][(-8*t1+t6-6)-2] + A[1][3 -1][(-8*t1+t6-6)-1] + A[1][3 -1][(-8*t1+t6-6)] + A[1][3 -1][(-8*t1+t6-6)+1] + A[1][3 -1][(-8*t1+t6-6)+2] + A[1][3 ][(-8*t1+t6-6)-2] + A[1][3 ][(-8*t1+t6-6)-1] + A[1][3 ][(-8*t1+t6-6)] + A[1][3 ][(-8*t1+t6-6)+1] + A[1][3 ][(-8*t1+t6-6)+2] + A[1][3 +1][(-8*t1+t6-6)-2] + A[1][3 +1][(-8*t1+t6-6)-1] + A[1][3 +1][(-8*t1+t6-6)] + A[1][3 +1][(-8*t1+t6-6)+1] + A[1][3 +1][(-8*t1+t6-6)+2] + A[1][3 +2][(-8*t1+t6-6)-2] + A[1][3 +2][(-8*t1+t6-6)-1] + A[1][3 +2][(-8*t1+t6-6)] + A[1][3 +2][(-8*t1+t6-6)+1] + A[1][3 +2][(-8*t1+t6-6)+2] );; } } } if (N <= 6) { for (t4=max(ceild(16*t2-N+3,4),ceild(16*t1-16*t2+N-1,4));t4<=min(min(floord(16*t1-16*t2+N+10,4),T-1),4*t2+2);t4++) { for (t5=4*t4+2;t5<=4*t4+N-3;t5++) { for (t6=4*t4+2;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=max(4*t4+4,-16*t1+16*t2+8*t4-11);t5<=4*t4+N-1;t5++) { for (t6=4*t4+4;t6<=4*t4+N-1;t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } } for (t4=max(max(max(0,ceild(512*t3-N+3,4)),2*t1),4*t1-4*t2+4);t4<=min(min(min(min(floord(512*t3-N+513,4),floord(16*t1-16*t2+N-3,4)),T-1),2*t1+1),128*t3-1);t4++) { for (t5=16*t2;t5<=-16*t1+16*t2+8*t4;t5++) { for (t6=512*t3;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } for (t6=4*t4+N-2;t6<=min(512*t3+511,4*t4+N-1);t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } for (t5=-16*t1+16*t2+8*t4+1;t5<=min(4*t4+N-1,-16*t1+16*t2+8*t4+4);t5++) { for (t6=512*t3;t6<=min(512*t3+511,4*t4+N-1);t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } if (t1 == 2*t2-1) { for (t4=max(max(0,ceild(512*t3-N+3,4)),2*t1);t4<=min(min(min(min(floord(8*t1+N-11,4),floord(512*t3-N+513,4)),T-1),2*t1+1),128*t3-1);t4++) { for (t5=8*t1+8;t5<=-8*t1+8*t4+8;t5++) { for (t6=512*t3;t6<=4*t4+N-3;t6++) { if ((t1+1)%2 == 0) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } if ((t1+1)%2 == 0) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } for (t6=4*t4+N-2;t6<=min(512*t3+511,4*t4+N-1);t6++) { if ((t1+1)%2 == 0) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t5=-8*t1+8*t4+9;t5<=min(-8*t1+8*t4+12,4*t4+N-1);t5++) { for (t6=512*t3;t6<=min(512*t3+511,4*t4+N-1);t6++) { if ((t1+1)%2 == 0) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } } } for (t4=max(max(max(0,ceild(512*t3-N+514,4)),2*t1),4*t1-4*t2+4);t4<=min(min(min(floord(16*t1-16*t2+N-3,4),T-1),2*t1+1),128*t3-1);t4++) { for (t5=16*t2;t5<=-16*t1+16*t2+8*t4;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } for (t5=-16*t1+16*t2+8*t4+1;t5<=min(4*t4+N-1,-16*t1+16*t2+8*t4+4);t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } if (t1 == 2*t2-1) { for (t4=max(max(0,ceild(512*t3-N+514,4)),2*t1);t4<=min(min(T-1,2*t1+1),128*t3-1);t4++) { for (t5=8*t1+8;t5<=-8*t1+8*t4+8;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } if ((t1+1)%2 == 0) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t5=-8*t1+8*t4+9;t5<=-8*t1+8*t4+12;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } } } if ((t1 == 2*t2-1) && (t1 <= min(min(floord(T-3,2),floord(512*t3-N+505,8)),64*t3-3)) && (t1 >= ceild(512*t3-N-5,8))) { for (t5=8*t1+10;t5<=8*t1+11;t5++) { for (t6=512*t3;t6<=8*t1+N+5;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } } for (t6=512*t3;t6<=8*t1+N+5;t6++) { if ((t1+1)%2 == 0) { A[1][4][(-8*t1+t6-8)] = 0.04*( A[0][4 -2][(-8*t1+t6-8)-2] + A[0][4 -2][(-8*t1+t6-8)-1] + A[0][4 -2][(-8*t1+t6-8)] + A[0][4 -2][(-8*t1+t6-8)+1] + A[0][4 -2][(-8*t1+t6-8)+2] + A[0][4 -1][(-8*t1+t6-8)-2] + A[0][4 -1][(-8*t1+t6-8)-1] + A[0][4 -1][(-8*t1+t6-8)] + A[0][4 -1][(-8*t1+t6-8)+1] + A[0][4 -1][(-8*t1+t6-8)+2] + A[0][4 ][(-8*t1+t6-8)-2] + A[0][4 ][(-8*t1+t6-8)-1] + A[0][4 ][(-8*t1+t6-8)] + A[0][4 ][(-8*t1+t6-8)+1] + A[0][4 ][(-8*t1+t6-8)+2] + A[0][4 +1][(-8*t1+t6-8)-2] + A[0][4 +1][(-8*t1+t6-8)-1] + A[0][4 +1][(-8*t1+t6-8)] + A[0][4 +1][(-8*t1+t6-8)+1] + A[0][4 +1][(-8*t1+t6-8)+2] + A[0][4 +2][(-8*t1+t6-8)-2] + A[0][4 +2][(-8*t1+t6-8)-1] + A[0][4 +2][(-8*t1+t6-8)] + A[0][4 +2][(-8*t1+t6-8)+1] + A[0][4 +2][(-8*t1+t6-8)+2] );; } } for (t5=8*t1+13;t5<=8*t1+23;t5++) { for (t6=512*t3;t6<=8*t1+N+5;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } for (t6=8*t1+N+6;t6<=min(512*t3+511,8*t1+N+7);t6++) { if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } } if ((t1 == 2*t2-1) && (t1 <= min(floord(T-3,2),64*t3-3)) && (t1 >= ceild(512*t3-N+506,8))) { for (t5=8*t1+10;t5<=8*t1+11;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } } for (t6=512*t3;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[1][4][(-8*t1+t6-8)] = 0.04*( A[0][4 -2][(-8*t1+t6-8)-2] + A[0][4 -2][(-8*t1+t6-8)-1] + A[0][4 -2][(-8*t1+t6-8)] + A[0][4 -2][(-8*t1+t6-8)+1] + A[0][4 -2][(-8*t1+t6-8)+2] + A[0][4 -1][(-8*t1+t6-8)-2] + A[0][4 -1][(-8*t1+t6-8)-1] + A[0][4 -1][(-8*t1+t6-8)] + A[0][4 -1][(-8*t1+t6-8)+1] + A[0][4 -1][(-8*t1+t6-8)+2] + A[0][4 ][(-8*t1+t6-8)-2] + A[0][4 ][(-8*t1+t6-8)-1] + A[0][4 ][(-8*t1+t6-8)] + A[0][4 ][(-8*t1+t6-8)+1] + A[0][4 ][(-8*t1+t6-8)+2] + A[0][4 +1][(-8*t1+t6-8)-2] + A[0][4 +1][(-8*t1+t6-8)-1] + A[0][4 +1][(-8*t1+t6-8)] + A[0][4 +1][(-8*t1+t6-8)+1] + A[0][4 +1][(-8*t1+t6-8)+2] + A[0][4 +2][(-8*t1+t6-8)-2] + A[0][4 +2][(-8*t1+t6-8)-1] + A[0][4 +2][(-8*t1+t6-8)] + A[0][4 +2][(-8*t1+t6-8)+1] + A[0][4 +2][(-8*t1+t6-8)+2] );; } } for (t5=8*t1+13;t5<=8*t1+23;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } } if ((N >= 7) && (N <= 9) && (t1 == 2*t2) && (t1 <= floord(T-3,2)) && (t1 >= 64*t3)) { for (t5=8*t1+10;t5<=8*t1+11;t5++) { for (t6=8*t1+10;t6<=8*t1+N+5;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } } for (t5=8*t1+12;t5<=8*t1+N+5;t5++) { for (t6=8*t1+10;t6<=8*t1+11;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } for (t6=8*t1+12;t6<=8*t1+N+5;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } if (t1%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } for (t6=8*t1+N+6;t6<=min(512*t3+511,8*t1+N+7);t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } for (t5=8*t1+N+6;t5<=min(8*t1+15,8*t1+N+7);t5++) { for (t6=8*t1+12;t6<=min(512*t3+511,8*t1+N+7);t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } } if ((N == 10) && (t1 == 2*t2) && (t1 <= min(floord(T-3,2),64*t3+60)) && (t1 >= 64*t3)) { for (t5=8*t1+10;t5<=8*t1+11;t5++) { for (t6=8*t1+10;t6<=8*t1+15;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } } for (t5=8*t1+12;t5<=8*t1+15;t5++) { for (t6=8*t1+10;t6<=8*t1+11;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } for (t6=8*t1+12;t6<=8*t1+15;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } if (t1%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } for (t6=8*t1+16;t6<=8*t1+17;t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } } if ((N >= 11) && (t1 == 2*t2) && (t1 <= min(floord(T-3,2),floord(512*t3-N+505,8))) && (t1 >= 64*t3)) { for (t5=8*t1+10;t5<=8*t1+11;t5++) { for (t6=8*t1+10;t6<=8*t1+N+5;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } } for (t5=8*t1+12;t5<=8*t1+15;t5++) { for (t6=8*t1+10;t6<=8*t1+11;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } for (t6=8*t1+12;t6<=8*t1+N+5;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } if (t1%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } for (t6=8*t1+N+6;t6<=min(512*t3+511,8*t1+N+7);t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } } if ((t1 == 2*t2) && (t1 <= floord(T-3,2)) && (t1 >= max(ceild(512*t3-N+506,8),64*t3))) { for (t5=8*t1+10;t5<=8*t1+11;t5++) { for (t6=8*t1+10;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } } for (t5=8*t1+12;t5<=8*t1+15;t5++) { for (t6=8*t1+10;t6<=8*t1+11;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } for (t6=8*t1+12;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } if (t1%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } } for (t4=max(max(2*t1,128*t3),4*t1-4*t2+4);t4<=min(min(min(floord(512*t3-N+513,4),floord(16*t1-16*t2+N-3,4)),T-1),2*t1+1);t4++) { for (t5=16*t2;t5<=-16*t1+16*t2+8*t4;t5++) { for (t6=4*t4+2;t6<=4*t4+3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } for (t6=4*t4+4;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } for (t6=4*t4+N-2;t6<=min(512*t3+511,4*t4+N-1);t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } for (t5=-16*t1+16*t2+8*t4+1;t5<=min(4*t4+N-1,-16*t1+16*t2+8*t4+4);t5++) { for (t6=4*t4+4;t6<=min(512*t3+511,4*t4+N-1);t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } if (t1 == 2*t2-1) { for (t4=max(2*t1,128*t3);t4<=min(min(min(floord(8*t1+N-11,4),floord(512*t3-N+513,4)),T-1),2*t1+1);t4++) { for (t5=8*t1+8;t5<=-8*t1+8*t4+8;t5++) { for (t6=4*t4+2;t6<=4*t4+3;t6++) { if ((t1+1)%2 == 0) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t6=4*t4+4;t6<=4*t4+N-3;t6++) { if ((t1+1)%2 == 0) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } if ((t1+1)%2 == 0) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } for (t6=4*t4+N-2;t6<=min(512*t3+511,4*t4+N-1);t6++) { if ((t1+1)%2 == 0) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t5=-8*t1+8*t4+9;t5<=min(-8*t1+8*t4+12,4*t4+N-1);t5++) { for (t6=4*t4+4;t6<=min(512*t3+511,4*t4+N-1);t6++) { if ((t1+1)%2 == 0) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } } } for (t4=max(max(max(ceild(512*t3-N+514,4),2*t1),128*t3),4*t1-4*t2+4);t4<=min(min(min(floord(16*t1-16*t2+N-3,4),T-1),2*t1+1),128*t3+126);t4++) { for (t5=16*t2;t5<=-16*t1+16*t2+8*t4;t5++) { for (t6=4*t4+2;t6<=4*t4+3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } for (t6=4*t4+4;t6<=512*t3+511;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } for (t5=-16*t1+16*t2+8*t4+1;t5<=min(4*t4+N-1,-16*t1+16*t2+8*t4+4);t5++) { for (t6=4*t4+4;t6<=512*t3+511;t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } if (t1 == 2*t2-1) { for (t4=max(max(max(ceild(8*t1-N+11,4),ceild(512*t3-N+514,4)),2*t1),128*t3);t4<=min(min(T-1,2*t1+1),128*t3+126);t4++) { for (t5=8*t1+8;t5<=-8*t1+8*t4+8;t5++) { for (t6=4*t4+2;t6<=4*t4+3;t6++) { if ((t1+1)%2 == 0) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t6=4*t4+4;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } if ((t1+1)%2 == 0) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t5=-8*t1+8*t4+9;t5<=min(-8*t1+8*t4+12,4*t4+N-1);t5++) { for (t6=4*t4+4;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } } } if ((N >= 22) && (t1 <= min(min(floord(T-2,2),floord(512*t3-N+502,8)),64*t3-1)) && (t1 >= max(0,ceild(512*t3-N+6,8))) && (8*t1 == 16*t2-N+6)) { for (t5=8*t1+N-6;t5<=8*t1+N+1;t5++) { for (t6=512*t3;t6<=8*t1+N+1;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } if ((8*t1+15*N+6)%16 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } for (t6=8*t1+N+2;t6<=8*t1+N+3;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } for (t6=512*t3;t6<=8*t1+N+3;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[0][(N-4)][(-8*t1+t6-6)] = 0.04*( A[1][(N-4)-2][(-8*t1+t6-6)-2] + A[1][(N-4)-2][(-8*t1+t6-6)-1] + A[1][(N-4)-2][(-8*t1+t6-6)] + A[1][(N-4)-2][(-8*t1+t6-6)+1] + A[1][(N-4)-2][(-8*t1+t6-6)+2] + A[1][(N-4)-1][(-8*t1+t6-6)-2] + A[1][(N-4)-1][(-8*t1+t6-6)-1] + A[1][(N-4)-1][(-8*t1+t6-6)] + A[1][(N-4)-1][(-8*t1+t6-6)+1] + A[1][(N-4)-1][(-8*t1+t6-6)+2] + A[1][(N-4) ][(-8*t1+t6-6)-2] + A[1][(N-4) ][(-8*t1+t6-6)-1] + A[1][(N-4) ][(-8*t1+t6-6)] + A[1][(N-4) ][(-8*t1+t6-6)+1] + A[1][(N-4) ][(-8*t1+t6-6)+2] + A[1][(N-4)+1][(-8*t1+t6-6)-2] + A[1][(N-4)+1][(-8*t1+t6-6)-1] + A[1][(N-4)+1][(-8*t1+t6-6)] + A[1][(N-4)+1][(-8*t1+t6-6)+1] + A[1][(N-4)+1][(-8*t1+t6-6)+2] + A[1][(N-4)+2][(-8*t1+t6-6)-2] + A[1][(N-4)+2][(-8*t1+t6-6)-1] + A[1][(N-4)+2][(-8*t1+t6-6)] + A[1][(N-4)+2][(-8*t1+t6-6)+1] + A[1][(N-4)+2][(-8*t1+t6-6)+2] );; } } for (t6=512*t3;t6<=8*t1+N+3;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[0][(N-3)][(-8*t1+t6-6)] = 0.04*( A[1][(N-3)-2][(-8*t1+t6-6)-2] + A[1][(N-3)-2][(-8*t1+t6-6)-1] + A[1][(N-3)-2][(-8*t1+t6-6)] + A[1][(N-3)-2][(-8*t1+t6-6)+1] + A[1][(N-3)-2][(-8*t1+t6-6)+2] + A[1][(N-3)-1][(-8*t1+t6-6)-2] + A[1][(N-3)-1][(-8*t1+t6-6)-1] + A[1][(N-3)-1][(-8*t1+t6-6)] + A[1][(N-3)-1][(-8*t1+t6-6)+1] + A[1][(N-3)-1][(-8*t1+t6-6)+2] + A[1][(N-3) ][(-8*t1+t6-6)-2] + A[1][(N-3) ][(-8*t1+t6-6)-1] + A[1][(N-3) ][(-8*t1+t6-6)] + A[1][(N-3) ][(-8*t1+t6-6)+1] + A[1][(N-3) ][(-8*t1+t6-6)+2] + A[1][(N-3)+1][(-8*t1+t6-6)-2] + A[1][(N-3)+1][(-8*t1+t6-6)-1] + A[1][(N-3)+1][(-8*t1+t6-6)] + A[1][(N-3)+1][(-8*t1+t6-6)+1] + A[1][(N-3)+1][(-8*t1+t6-6)+2] + A[1][(N-3)+2][(-8*t1+t6-6)-2] + A[1][(N-3)+2][(-8*t1+t6-6)-1] + A[1][(N-3)+2][(-8*t1+t6-6)] + A[1][(N-3)+2][(-8*t1+t6-6)+1] + A[1][(N-3)+2][(-8*t1+t6-6)+2] );; } } } if ((t1 <= min(min(min(min(floord(T-2,2),floord(16*t2-N+5,8)),floord(512*t3-N+509,8)),2*t2-2),64*t3-1)) && (t1 >= max(max(0,ceild(16*t2-N-1,8)),ceild(512*t3-N-1,8)))) { for (t5=16*t2;t5<=8*t1+N+1;t5++) { for (t6=512*t3;t6<=8*t1+N+1;t6++) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } for (t6=8*t1+N+2;t6<=8*t1+N+3;t6++) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } for (t5=8*t1+N+2;t5<=8*t1+N+3;t5++) { for (t6=512*t3;t6<=8*t1+N+3;t6++) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } if ((N == 14) && (t1 == 2*t2-1) && (t1 == 64*t3-1) && (t1 >= 63) && (t1 <= floord(T-2,2))) { for (t5=8*t1+8;t5<=8*t1+15;t5++) { for (t6=8*t1+8;t6<=8*t1+15;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } } if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } for (t6=8*t1+16;t6<=8*t1+17;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } } for (t6=8*t1+8;t6<=8*t1+17;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][10][(-8*t1+t6-6)] = 0.04*( A[1][10 -2][(-8*t1+t6-6)-2] + A[1][10 -2][(-8*t1+t6-6)-1] + A[1][10 -2][(-8*t1+t6-6)] + A[1][10 -2][(-8*t1+t6-6)+1] + A[1][10 -2][(-8*t1+t6-6)+2] + A[1][10 -1][(-8*t1+t6-6)-2] + A[1][10 -1][(-8*t1+t6-6)-1] + A[1][10 -1][(-8*t1+t6-6)] + A[1][10 -1][(-8*t1+t6-6)+1] + A[1][10 -1][(-8*t1+t6-6)+2] + A[1][10 ][(-8*t1+t6-6)-2] + A[1][10 ][(-8*t1+t6-6)-1] + A[1][10 ][(-8*t1+t6-6)] + A[1][10 ][(-8*t1+t6-6)+1] + A[1][10 ][(-8*t1+t6-6)+2] + A[1][10 +1][(-8*t1+t6-6)-2] + A[1][10 +1][(-8*t1+t6-6)-1] + A[1][10 +1][(-8*t1+t6-6)] + A[1][10 +1][(-8*t1+t6-6)+1] + A[1][10 +1][(-8*t1+t6-6)+2] + A[1][10 +2][(-8*t1+t6-6)-2] + A[1][10 +2][(-8*t1+t6-6)-1] + A[1][10 +2][(-8*t1+t6-6)] + A[1][10 +2][(-8*t1+t6-6)+1] + A[1][10 +2][(-8*t1+t6-6)+2] );; } } } for (t6=8*t1+8;t6<=8*t1+17;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][11][(-8*t1+t6-6)] = 0.04*( A[1][11 -2][(-8*t1+t6-6)-2] + A[1][11 -2][(-8*t1+t6-6)-1] + A[1][11 -2][(-8*t1+t6-6)] + A[1][11 -2][(-8*t1+t6-6)+1] + A[1][11 -2][(-8*t1+t6-6)+2] + A[1][11 -1][(-8*t1+t6-6)-2] + A[1][11 -1][(-8*t1+t6-6)-1] + A[1][11 -1][(-8*t1+t6-6)] + A[1][11 -1][(-8*t1+t6-6)+1] + A[1][11 -1][(-8*t1+t6-6)+2] + A[1][11 ][(-8*t1+t6-6)-2] + A[1][11 ][(-8*t1+t6-6)-1] + A[1][11 ][(-8*t1+t6-6)] + A[1][11 ][(-8*t1+t6-6)+1] + A[1][11 ][(-8*t1+t6-6)+2] + A[1][11 +1][(-8*t1+t6-6)-2] + A[1][11 +1][(-8*t1+t6-6)-1] + A[1][11 +1][(-8*t1+t6-6)] + A[1][11 +1][(-8*t1+t6-6)+1] + A[1][11 +1][(-8*t1+t6-6)+2] + A[1][11 +2][(-8*t1+t6-6)-2] + A[1][11 +2][(-8*t1+t6-6)-1] + A[1][11 +2][(-8*t1+t6-6)] + A[1][11 +2][(-8*t1+t6-6)+1] + A[1][11 +2][(-8*t1+t6-6)+2] );; } } } } if ((N >= 7) && (N <= 13) && (t1 == 2*t2-1) && (t1 == 64*t3-1) && (t1 >= 63) && (t1 <= floord(T-2,2))) { for (t5=8*t1+8;t5<=8*t1+N+1;t5++) { for (t6=8*t1+8;t6<=8*t1+N+1;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } } if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } for (t6=8*t1+N+2;t6<=8*t1+N+3;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } } for (t5=8*t1+N+2;t5<=8*t1+N+3;t5++) { for (t6=8*t1+8;t6<=8*t1+N+3;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } } } if ((t1 <= min(floord(T-2,2),64*t3-1)) && (t1 >= max(0,ceild(512*t3-N+518,8))) && (8*t1 == 16*t2-N+6)) { for (t5=8*t1+N-6;t5<=8*t1+N+1;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } if ((8*t1+15*N+6)%16 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } for (t6=512*t3;t6<=512*t3+511;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[0][(N-4)][(-8*t1+t6-6)] = 0.04*( A[1][(N-4)-2][(-8*t1+t6-6)-2] + A[1][(N-4)-2][(-8*t1+t6-6)-1] + A[1][(N-4)-2][(-8*t1+t6-6)] + A[1][(N-4)-2][(-8*t1+t6-6)+1] + A[1][(N-4)-2][(-8*t1+t6-6)+2] + A[1][(N-4)-1][(-8*t1+t6-6)-2] + A[1][(N-4)-1][(-8*t1+t6-6)-1] + A[1][(N-4)-1][(-8*t1+t6-6)] + A[1][(N-4)-1][(-8*t1+t6-6)+1] + A[1][(N-4)-1][(-8*t1+t6-6)+2] + A[1][(N-4) ][(-8*t1+t6-6)-2] + A[1][(N-4) ][(-8*t1+t6-6)-1] + A[1][(N-4) ][(-8*t1+t6-6)] + A[1][(N-4) ][(-8*t1+t6-6)+1] + A[1][(N-4) ][(-8*t1+t6-6)+2] + A[1][(N-4)+1][(-8*t1+t6-6)-2] + A[1][(N-4)+1][(-8*t1+t6-6)-1] + A[1][(N-4)+1][(-8*t1+t6-6)] + A[1][(N-4)+1][(-8*t1+t6-6)+1] + A[1][(N-4)+1][(-8*t1+t6-6)+2] + A[1][(N-4)+2][(-8*t1+t6-6)-2] + A[1][(N-4)+2][(-8*t1+t6-6)-1] + A[1][(N-4)+2][(-8*t1+t6-6)] + A[1][(N-4)+2][(-8*t1+t6-6)+1] + A[1][(N-4)+2][(-8*t1+t6-6)+2] );; } } for (t6=512*t3;t6<=512*t3+511;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[0][(N-3)][(-8*t1+t6-6)] = 0.04*( A[1][(N-3)-2][(-8*t1+t6-6)-2] + A[1][(N-3)-2][(-8*t1+t6-6)-1] + A[1][(N-3)-2][(-8*t1+t6-6)] + A[1][(N-3)-2][(-8*t1+t6-6)+1] + A[1][(N-3)-2][(-8*t1+t6-6)+2] + A[1][(N-3)-1][(-8*t1+t6-6)-2] + A[1][(N-3)-1][(-8*t1+t6-6)-1] + A[1][(N-3)-1][(-8*t1+t6-6)] + A[1][(N-3)-1][(-8*t1+t6-6)+1] + A[1][(N-3)-1][(-8*t1+t6-6)+2] + A[1][(N-3) ][(-8*t1+t6-6)-2] + A[1][(N-3) ][(-8*t1+t6-6)-1] + A[1][(N-3) ][(-8*t1+t6-6)] + A[1][(N-3) ][(-8*t1+t6-6)+1] + A[1][(N-3) ][(-8*t1+t6-6)+2] + A[1][(N-3)+1][(-8*t1+t6-6)-2] + A[1][(N-3)+1][(-8*t1+t6-6)-1] + A[1][(N-3)+1][(-8*t1+t6-6)] + A[1][(N-3)+1][(-8*t1+t6-6)+1] + A[1][(N-3)+1][(-8*t1+t6-6)+2] + A[1][(N-3)+2][(-8*t1+t6-6)-2] + A[1][(N-3)+2][(-8*t1+t6-6)-1] + A[1][(N-3)+2][(-8*t1+t6-6)] + A[1][(N-3)+2][(-8*t1+t6-6)+1] + A[1][(N-3)+2][(-8*t1+t6-6)+2] );; } } } if ((t1 <= min(min(floord(T-2,2),floord(16*t2-N+5,8)),64*t3-1)) && (t1 >= max(max(0,ceild(16*t2-N-1,8)),ceild(512*t3-N+510,8)))) { for (t5=16*t2;t5<=8*t1+N+1;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } for (t5=8*t1+N+2;t5<=8*t1+N+3;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } if ((N >= 18) && (t1 == 2*t2-1) && (t1 <= min(floord(T-3,2),floord(512*t3-N+505,8))) && (t1 >= 64*t3-1)) { for (t5=8*t1+10;t5<=8*t1+11;t5++) { for (t6=8*t1+10;t6<=8*t1+N+5;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } } for (t6=8*t1+10;t6<=8*t1+N+5;t6++) { if ((t1+1)%2 == 0) { A[1][4][(-8*t1+t6-8)] = 0.04*( A[0][4 -2][(-8*t1+t6-8)-2] + A[0][4 -2][(-8*t1+t6-8)-1] + A[0][4 -2][(-8*t1+t6-8)] + A[0][4 -2][(-8*t1+t6-8)+1] + A[0][4 -2][(-8*t1+t6-8)+2] + A[0][4 -1][(-8*t1+t6-8)-2] + A[0][4 -1][(-8*t1+t6-8)-1] + A[0][4 -1][(-8*t1+t6-8)] + A[0][4 -1][(-8*t1+t6-8)+1] + A[0][4 -1][(-8*t1+t6-8)+2] + A[0][4 ][(-8*t1+t6-8)-2] + A[0][4 ][(-8*t1+t6-8)-1] + A[0][4 ][(-8*t1+t6-8)] + A[0][4 ][(-8*t1+t6-8)+1] + A[0][4 ][(-8*t1+t6-8)+2] + A[0][4 +1][(-8*t1+t6-8)-2] + A[0][4 +1][(-8*t1+t6-8)-1] + A[0][4 +1][(-8*t1+t6-8)] + A[0][4 +1][(-8*t1+t6-8)+1] + A[0][4 +1][(-8*t1+t6-8)+2] + A[0][4 +2][(-8*t1+t6-8)-2] + A[0][4 +2][(-8*t1+t6-8)-1] + A[0][4 +2][(-8*t1+t6-8)] + A[0][4 +2][(-8*t1+t6-8)+1] + A[0][4 +2][(-8*t1+t6-8)+2] );; } } for (t5=8*t1+13;t5<=8*t1+23;t5++) { for (t6=8*t1+10;t6<=8*t1+11;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } for (t6=8*t1+12;t6<=8*t1+N+5;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } for (t6=8*t1+N+6;t6<=min(512*t3+511,8*t1+N+7);t6++) { if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } } if ((t1 == 2*t2-1) && (t1 <= min(floord(T-3,2),64*t3+61)) && (t1 >= max(ceild(512*t3-N+506,8),64*t3-1))) { for (t5=8*t1+10;t5<=8*t1+11;t5++) { for (t6=8*t1+10;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } } for (t6=8*t1+10;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[1][4][(-8*t1+t6-8)] = 0.04*( A[0][4 -2][(-8*t1+t6-8)-2] + A[0][4 -2][(-8*t1+t6-8)-1] + A[0][4 -2][(-8*t1+t6-8)] + A[0][4 -2][(-8*t1+t6-8)+1] + A[0][4 -2][(-8*t1+t6-8)+2] + A[0][4 -1][(-8*t1+t6-8)-2] + A[0][4 -1][(-8*t1+t6-8)-1] + A[0][4 -1][(-8*t1+t6-8)] + A[0][4 -1][(-8*t1+t6-8)+1] + A[0][4 -1][(-8*t1+t6-8)+2] + A[0][4 ][(-8*t1+t6-8)-2] + A[0][4 ][(-8*t1+t6-8)-1] + A[0][4 ][(-8*t1+t6-8)] + A[0][4 ][(-8*t1+t6-8)+1] + A[0][4 ][(-8*t1+t6-8)+2] + A[0][4 +1][(-8*t1+t6-8)-2] + A[0][4 +1][(-8*t1+t6-8)-1] + A[0][4 +1][(-8*t1+t6-8)] + A[0][4 +1][(-8*t1+t6-8)+1] + A[0][4 +1][(-8*t1+t6-8)+2] + A[0][4 +2][(-8*t1+t6-8)-2] + A[0][4 +2][(-8*t1+t6-8)-1] + A[0][4 +2][(-8*t1+t6-8)] + A[0][4 +2][(-8*t1+t6-8)+1] + A[0][4 +2][(-8*t1+t6-8)+2] );; } } for (t5=8*t1+13;t5<=8*t1+23;t5++) { for (t6=8*t1+10;t6<=8*t1+11;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } for (t6=8*t1+12;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } } for (t4=max(ceild(512*t3-N+3,4),2*t1+2);t4<=min(min(min(min(floord(16*t2-N+17,4),floord(512*t3-N+513,4)),floord(16*t1-16*t2+N+8,4)),T-1),128*t3-1);t4++) { for (t5=-16*t1+16*t2+8*t4-15;t5<=-16*t1+16*t2+8*t4-12;t5++) { for (t6=512*t3;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=-16*t1+16*t2+8*t4-11;t5<=4*t4+N-3;t5++) { for (t6=512*t3;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } for (t6=4*t4+N-2;t6<=min(512*t3+511,4*t4+N-1);t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } for (t5=4*t4+N-2;t5<=min(16*t2+15,4*t4+N-1);t5++) { for (t6=512*t3;t6<=min(512*t3+511,4*t4+N-1);t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t4=max(max(max(ceild(16*t2-N+18,4),ceild(512*t3-N+3,4)),2*t1+2),4*t1-4*t2+5);t4<=min(min(min(floord(512*t3-N+513,4),T-1),2*t1+3),128*t3-1);t4++) { for (t5=-16*t1+16*t2+8*t4-15;t5<=-16*t1+16*t2+8*t4-12;t5++) { for (t6=512*t3;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=-16*t1+16*t2+8*t4-11;t5<=16*t2+15;t5++) { for (t6=512*t3;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } for (t6=4*t4+N-2;t6<=min(512*t3+511,4*t4+N-1);t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t4=max(ceild(512*t3-N+514,4),2*t1+2);t4<=min(min(min(floord(16*t2-N+17,4),floord(16*t1-16*t2+N+8,4)),T-1),128*t3-1);t4++) { for (t5=-16*t1+16*t2+8*t4-15;t5<=-16*t1+16*t2+8*t4-12;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=-16*t1+16*t2+8*t4-11;t5<=4*t4+N-3;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } for (t5=4*t4+N-2;t5<=min(16*t2+15,4*t4+N-1);t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t4=max(max(max(ceild(16*t2-N+18,4),ceild(512*t3-N+514,4)),2*t1+2),4*t1-4*t2+5);t4<=min(min(T-1,2*t1+3),128*t3-1);t4++) { for (t5=-16*t1+16*t2+8*t4-15;t5<=-16*t1+16*t2+8*t4-12;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=-16*t1+16*t2+8*t4-11;t5<=16*t2+15;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t4=max(ceild(512*t3-N+514,4),ceild(16*t1-16*t2+N+9,4));t4<=min(min(min(floord(16*t1-16*t2+N+10,4),T-1),2*t1+3),128*t3-1);t4++) { for (t5=-16*t1+16*t2+8*t4-15;t5<=4*t4+N-3;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=-16*t1+16*t2+8*t4-11;t5<=4*t4+N-1;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } if ((N >= 22) && (t1 <= min(floord(T-2,2),floord(512*t3-N+502,8))) && (t1 >= 64*t3) && (8*t1 == 16*t2-N+6)) { for (t5=8*t1+N-6;t5<=8*t1+N+1;t5++) { for (t6=8*t1+6;t6<=8*t1+7;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } } for (t6=8*t1+8;t6<=8*t1+N+1;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } if ((8*t1+15*N+6)%16 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } for (t6=8*t1+N+2;t6<=8*t1+N+3;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } for (t6=8*t1+8;t6<=8*t1+N+3;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[0][(N-4)][(-8*t1+t6-6)] = 0.04*( A[1][(N-4)-2][(-8*t1+t6-6)-2] + A[1][(N-4)-2][(-8*t1+t6-6)-1] + A[1][(N-4)-2][(-8*t1+t6-6)] + A[1][(N-4)-2][(-8*t1+t6-6)+1] + A[1][(N-4)-2][(-8*t1+t6-6)+2] + A[1][(N-4)-1][(-8*t1+t6-6)-2] + A[1][(N-4)-1][(-8*t1+t6-6)-1] + A[1][(N-4)-1][(-8*t1+t6-6)] + A[1][(N-4)-1][(-8*t1+t6-6)+1] + A[1][(N-4)-1][(-8*t1+t6-6)+2] + A[1][(N-4) ][(-8*t1+t6-6)-2] + A[1][(N-4) ][(-8*t1+t6-6)-1] + A[1][(N-4) ][(-8*t1+t6-6)] + A[1][(N-4) ][(-8*t1+t6-6)+1] + A[1][(N-4) ][(-8*t1+t6-6)+2] + A[1][(N-4)+1][(-8*t1+t6-6)-2] + A[1][(N-4)+1][(-8*t1+t6-6)-1] + A[1][(N-4)+1][(-8*t1+t6-6)] + A[1][(N-4)+1][(-8*t1+t6-6)+1] + A[1][(N-4)+1][(-8*t1+t6-6)+2] + A[1][(N-4)+2][(-8*t1+t6-6)-2] + A[1][(N-4)+2][(-8*t1+t6-6)-1] + A[1][(N-4)+2][(-8*t1+t6-6)] + A[1][(N-4)+2][(-8*t1+t6-6)+1] + A[1][(N-4)+2][(-8*t1+t6-6)+2] );; } } for (t6=8*t1+8;t6<=8*t1+N+3;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[0][(N-3)][(-8*t1+t6-6)] = 0.04*( A[1][(N-3)-2][(-8*t1+t6-6)-2] + A[1][(N-3)-2][(-8*t1+t6-6)-1] + A[1][(N-3)-2][(-8*t1+t6-6)] + A[1][(N-3)-2][(-8*t1+t6-6)+1] + A[1][(N-3)-2][(-8*t1+t6-6)+2] + A[1][(N-3)-1][(-8*t1+t6-6)-2] + A[1][(N-3)-1][(-8*t1+t6-6)-1] + A[1][(N-3)-1][(-8*t1+t6-6)] + A[1][(N-3)-1][(-8*t1+t6-6)+1] + A[1][(N-3)-1][(-8*t1+t6-6)+2] + A[1][(N-3) ][(-8*t1+t6-6)-2] + A[1][(N-3) ][(-8*t1+t6-6)-1] + A[1][(N-3) ][(-8*t1+t6-6)] + A[1][(N-3) ][(-8*t1+t6-6)+1] + A[1][(N-3) ][(-8*t1+t6-6)+2] + A[1][(N-3)+1][(-8*t1+t6-6)-2] + A[1][(N-3)+1][(-8*t1+t6-6)-1] + A[1][(N-3)+1][(-8*t1+t6-6)] + A[1][(N-3)+1][(-8*t1+t6-6)+1] + A[1][(N-3)+1][(-8*t1+t6-6)+2] + A[1][(N-3)+2][(-8*t1+t6-6)-2] + A[1][(N-3)+2][(-8*t1+t6-6)-1] + A[1][(N-3)+2][(-8*t1+t6-6)] + A[1][(N-3)+2][(-8*t1+t6-6)+1] + A[1][(N-3)+2][(-8*t1+t6-6)+2] );; } } } if ((t1 <= min(min(min(floord(T-2,2),floord(16*t2-N+5,8)),floord(512*t3-N+509,8)),2*t2-2)) && (t1 >= max(ceild(16*t2-N-1,8),64*t3))) { for (t5=16*t2;t5<=8*t1+N+1;t5++) { for (t6=8*t1+6;t6<=8*t1+7;t6++) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } for (t6=8*t1+8;t6<=8*t1+N+1;t6++) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } for (t6=8*t1+N+2;t6<=8*t1+N+3;t6++) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } for (t5=8*t1+N+2;t5<=8*t1+N+3;t5++) { for (t6=8*t1+8;t6<=8*t1+N+3;t6++) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } if ((N == 14) && (t1 == 2*t2-1) && (t1 <= min(floord(T-2,2),64*t3+61)) && (t1 >= 64*t3+1)) { for (t5=8*t1+8;t5<=8*t1+15;t5++) { for (t6=8*t1+6;t6<=8*t1+7;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } } for (t6=8*t1+8;t6<=8*t1+15;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } for (t6=8*t1+16;t6<=8*t1+17;t6++) { if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } for (t6=8*t1+8;t6<=8*t1+17;t6++) { if ((t1+1)%2 == 0) { A[0][10][(-8*t1+t6-6)] = 0.04*( A[1][10 -2][(-8*t1+t6-6)-2] + A[1][10 -2][(-8*t1+t6-6)-1] + A[1][10 -2][(-8*t1+t6-6)] + A[1][10 -2][(-8*t1+t6-6)+1] + A[1][10 -2][(-8*t1+t6-6)+2] + A[1][10 -1][(-8*t1+t6-6)-2] + A[1][10 -1][(-8*t1+t6-6)-1] + A[1][10 -1][(-8*t1+t6-6)] + A[1][10 -1][(-8*t1+t6-6)+1] + A[1][10 -1][(-8*t1+t6-6)+2] + A[1][10 ][(-8*t1+t6-6)-2] + A[1][10 ][(-8*t1+t6-6)-1] + A[1][10 ][(-8*t1+t6-6)] + A[1][10 ][(-8*t1+t6-6)+1] + A[1][10 ][(-8*t1+t6-6)+2] + A[1][10 +1][(-8*t1+t6-6)-2] + A[1][10 +1][(-8*t1+t6-6)-1] + A[1][10 +1][(-8*t1+t6-6)] + A[1][10 +1][(-8*t1+t6-6)+1] + A[1][10 +1][(-8*t1+t6-6)+2] + A[1][10 +2][(-8*t1+t6-6)-2] + A[1][10 +2][(-8*t1+t6-6)-1] + A[1][10 +2][(-8*t1+t6-6)] + A[1][10 +2][(-8*t1+t6-6)+1] + A[1][10 +2][(-8*t1+t6-6)+2] );; } } for (t6=8*t1+8;t6<=8*t1+17;t6++) { if ((t1+1)%2 == 0) { A[0][11][(-8*t1+t6-6)] = 0.04*( A[1][11 -2][(-8*t1+t6-6)-2] + A[1][11 -2][(-8*t1+t6-6)-1] + A[1][11 -2][(-8*t1+t6-6)] + A[1][11 -2][(-8*t1+t6-6)+1] + A[1][11 -2][(-8*t1+t6-6)+2] + A[1][11 -1][(-8*t1+t6-6)-2] + A[1][11 -1][(-8*t1+t6-6)-1] + A[1][11 -1][(-8*t1+t6-6)] + A[1][11 -1][(-8*t1+t6-6)+1] + A[1][11 -1][(-8*t1+t6-6)+2] + A[1][11 ][(-8*t1+t6-6)-2] + A[1][11 ][(-8*t1+t6-6)-1] + A[1][11 ][(-8*t1+t6-6)] + A[1][11 ][(-8*t1+t6-6)+1] + A[1][11 ][(-8*t1+t6-6)+2] + A[1][11 +1][(-8*t1+t6-6)-2] + A[1][11 +1][(-8*t1+t6-6)-1] + A[1][11 +1][(-8*t1+t6-6)] + A[1][11 +1][(-8*t1+t6-6)+1] + A[1][11 +1][(-8*t1+t6-6)+2] + A[1][11 +2][(-8*t1+t6-6)-2] + A[1][11 +2][(-8*t1+t6-6)-1] + A[1][11 +2][(-8*t1+t6-6)] + A[1][11 +2][(-8*t1+t6-6)+1] + A[1][11 +2][(-8*t1+t6-6)+2] );; } } } if ((N >= 7) && (N <= 13) && (t1 == 2*t2-1) && (t1 <= min(floord(T-2,2),floord(512*t3-N+509,8))) && (t1 >= 64*t3+1)) { for (t5=8*t1+8;t5<=8*t1+N+1;t5++) { for (t6=8*t1+6;t6<=8*t1+7;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } } for (t6=8*t1+8;t6<=8*t1+N+1;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } for (t6=8*t1+N+2;t6<=8*t1+N+3;t6++) { if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } for (t5=8*t1+N+2;t5<=8*t1+N+3;t5++) { for (t6=8*t1+8;t6<=8*t1+N+3;t6++) { if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } } if ((t1 <= min(floord(T-2,2),64*t3+62)) && (t1 >= max(ceild(512*t3-N+518,8),64*t3)) && (8*t1 == 16*t2-N+6)) { for (t5=8*t1+N-6;t5<=8*t1+N+1;t5++) { for (t6=8*t1+6;t6<=8*t1+7;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } } for (t6=8*t1+8;t6<=512*t3+511;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } if ((8*t1+15*N+6)%16 == 0) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } for (t6=8*t1+8;t6<=512*t3+511;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[0][(N-4)][(-8*t1+t6-6)] = 0.04*( A[1][(N-4)-2][(-8*t1+t6-6)-2] + A[1][(N-4)-2][(-8*t1+t6-6)-1] + A[1][(N-4)-2][(-8*t1+t6-6)] + A[1][(N-4)-2][(-8*t1+t6-6)+1] + A[1][(N-4)-2][(-8*t1+t6-6)+2] + A[1][(N-4)-1][(-8*t1+t6-6)-2] + A[1][(N-4)-1][(-8*t1+t6-6)-1] + A[1][(N-4)-1][(-8*t1+t6-6)] + A[1][(N-4)-1][(-8*t1+t6-6)+1] + A[1][(N-4)-1][(-8*t1+t6-6)+2] + A[1][(N-4) ][(-8*t1+t6-6)-2] + A[1][(N-4) ][(-8*t1+t6-6)-1] + A[1][(N-4) ][(-8*t1+t6-6)] + A[1][(N-4) ][(-8*t1+t6-6)+1] + A[1][(N-4) ][(-8*t1+t6-6)+2] + A[1][(N-4)+1][(-8*t1+t6-6)-2] + A[1][(N-4)+1][(-8*t1+t6-6)-1] + A[1][(N-4)+1][(-8*t1+t6-6)] + A[1][(N-4)+1][(-8*t1+t6-6)+1] + A[1][(N-4)+1][(-8*t1+t6-6)+2] + A[1][(N-4)+2][(-8*t1+t6-6)-2] + A[1][(N-4)+2][(-8*t1+t6-6)-1] + A[1][(N-4)+2][(-8*t1+t6-6)] + A[1][(N-4)+2][(-8*t1+t6-6)+1] + A[1][(N-4)+2][(-8*t1+t6-6)+2] );; } } for (t6=8*t1+8;t6<=512*t3+511;t6++) { if ((8*t1+15*N+6)%16 == 0) { A[0][(N-3)][(-8*t1+t6-6)] = 0.04*( A[1][(N-3)-2][(-8*t1+t6-6)-2] + A[1][(N-3)-2][(-8*t1+t6-6)-1] + A[1][(N-3)-2][(-8*t1+t6-6)] + A[1][(N-3)-2][(-8*t1+t6-6)+1] + A[1][(N-3)-2][(-8*t1+t6-6)+2] + A[1][(N-3)-1][(-8*t1+t6-6)-2] + A[1][(N-3)-1][(-8*t1+t6-6)-1] + A[1][(N-3)-1][(-8*t1+t6-6)] + A[1][(N-3)-1][(-8*t1+t6-6)+1] + A[1][(N-3)-1][(-8*t1+t6-6)+2] + A[1][(N-3) ][(-8*t1+t6-6)-2] + A[1][(N-3) ][(-8*t1+t6-6)-1] + A[1][(N-3) ][(-8*t1+t6-6)] + A[1][(N-3) ][(-8*t1+t6-6)+1] + A[1][(N-3) ][(-8*t1+t6-6)+2] + A[1][(N-3)+1][(-8*t1+t6-6)-2] + A[1][(N-3)+1][(-8*t1+t6-6)-1] + A[1][(N-3)+1][(-8*t1+t6-6)] + A[1][(N-3)+1][(-8*t1+t6-6)+1] + A[1][(N-3)+1][(-8*t1+t6-6)+2] + A[1][(N-3)+2][(-8*t1+t6-6)-2] + A[1][(N-3)+2][(-8*t1+t6-6)-1] + A[1][(N-3)+2][(-8*t1+t6-6)] + A[1][(N-3)+2][(-8*t1+t6-6)+1] + A[1][(N-3)+2][(-8*t1+t6-6)+2] );; } } } if ((t1 <= min(min(floord(T-2,2),floord(16*t2-N+5,8)),64*t3+62)) && (t1 >= max(max(ceild(16*t2-N-1,8),ceild(512*t3-N+510,8)),64*t3))) { for (t5=16*t2;t5<=8*t1+N+1;t5++) { for (t6=8*t1+6;t6<=8*t1+7;t6++) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; } for (t6=8*t1+8;t6<=512*t3+511;t6++) { A[1][(-8*t1+t5-4)][(-8*t1+t6-4)] = 0.04*( A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-2][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)-1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4) ][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+1][(-8*t1+t6-4)+2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-2] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)-1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+1] + A[0][(-8*t1+t5-4)+2][(-8*t1+t6-4)+2] );; A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } for (t5=8*t1+N+2;t5<=8*t1+N+3;t5++) { for (t6=8*t1+8;t6<=512*t3+511;t6++) { A[0][(-8*t1+t5-6)][(-8*t1+t6-6)] = 0.04*( A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-2][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)-1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6) ][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+1][(-8*t1+t6-6)+2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-2] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)-1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+1] + A[1][(-8*t1+t5-6)+2][(-8*t1+t6-6)+2] );; } } } if ((N >= 8) && (N <= 17) && (t1 == 2*t2-1) && (t1 <= min(floord(T-3,2),64*t3+61)) && (t1 >= 64*t3-1)) { for (t5=8*t1+10;t5<=8*t1+11;t5++) { for (t6=8*t1+10;t6<=8*t1+N+5;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } } for (t6=8*t1+10;t6<=8*t1+N+5;t6++) { if ((t1+1)%2 == 0) { A[1][4][(-8*t1+t6-8)] = 0.04*( A[0][4 -2][(-8*t1+t6-8)-2] + A[0][4 -2][(-8*t1+t6-8)-1] + A[0][4 -2][(-8*t1+t6-8)] + A[0][4 -2][(-8*t1+t6-8)+1] + A[0][4 -2][(-8*t1+t6-8)+2] + A[0][4 -1][(-8*t1+t6-8)-2] + A[0][4 -1][(-8*t1+t6-8)-1] + A[0][4 -1][(-8*t1+t6-8)] + A[0][4 -1][(-8*t1+t6-8)+1] + A[0][4 -1][(-8*t1+t6-8)+2] + A[0][4 ][(-8*t1+t6-8)-2] + A[0][4 ][(-8*t1+t6-8)-1] + A[0][4 ][(-8*t1+t6-8)] + A[0][4 ][(-8*t1+t6-8)+1] + A[0][4 ][(-8*t1+t6-8)+2] + A[0][4 +1][(-8*t1+t6-8)-2] + A[0][4 +1][(-8*t1+t6-8)-1] + A[0][4 +1][(-8*t1+t6-8)] + A[0][4 +1][(-8*t1+t6-8)+1] + A[0][4 +1][(-8*t1+t6-8)+2] + A[0][4 +2][(-8*t1+t6-8)-2] + A[0][4 +2][(-8*t1+t6-8)-1] + A[0][4 +2][(-8*t1+t6-8)] + A[0][4 +2][(-8*t1+t6-8)+1] + A[0][4 +2][(-8*t1+t6-8)+2] );; } } for (t5=8*t1+13;t5<=8*t1+N+5;t5++) { for (t6=8*t1+10;t6<=8*t1+11;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } for (t6=8*t1+12;t6<=8*t1+N+5;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } for (t6=8*t1+N+6;t6<=min(512*t3+511,8*t1+N+7);t6++) { if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } for (t5=8*t1+N+6;t5<=min(8*t1+23,8*t1+N+7);t5++) { for (t6=8*t1+12;t6<=min(512*t3+511,8*t1+N+7);t6++) { if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } } if ((N == 7) && (t1 == 2*t2-1) && (t1 <= min(floord(T-3,2),64*t3+61))) { for (t5=8*t1+10;t5<=8*t1+11;t5++) { for (t6=8*t1+10;t6<=8*t1+12;t6++) { if ((t1+1)%2 == 0) { A[1][(-8*t1+t5-8)][(-8*t1+t6-8)] = 0.04*( A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-2][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)-1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8) ][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+1][(-8*t1+t6-8)+2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-2] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)-1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+1] + A[0][(-8*t1+t5-8)+2][(-8*t1+t6-8)+2] );; } } } for (t6=8*t1+10;t6<=8*t1+12;t6++) { if ((t1+1)%2 == 0) { A[1][4][(-8*t1+t6-8)] = 0.04*( A[0][4 -2][(-8*t1+t6-8)-2] + A[0][4 -2][(-8*t1+t6-8)-1] + A[0][4 -2][(-8*t1+t6-8)] + A[0][4 -2][(-8*t1+t6-8)+1] + A[0][4 -2][(-8*t1+t6-8)+2] + A[0][4 -1][(-8*t1+t6-8)-2] + A[0][4 -1][(-8*t1+t6-8)-1] + A[0][4 -1][(-8*t1+t6-8)] + A[0][4 -1][(-8*t1+t6-8)+1] + A[0][4 -1][(-8*t1+t6-8)+2] + A[0][4 ][(-8*t1+t6-8)-2] + A[0][4 ][(-8*t1+t6-8)-1] + A[0][4 ][(-8*t1+t6-8)] + A[0][4 ][(-8*t1+t6-8)+1] + A[0][4 ][(-8*t1+t6-8)+2] + A[0][4 +1][(-8*t1+t6-8)-2] + A[0][4 +1][(-8*t1+t6-8)-1] + A[0][4 +1][(-8*t1+t6-8)] + A[0][4 +1][(-8*t1+t6-8)+1] + A[0][4 +1][(-8*t1+t6-8)+2] + A[0][4 +2][(-8*t1+t6-8)-2] + A[0][4 +2][(-8*t1+t6-8)-1] + A[0][4 +2][(-8*t1+t6-8)] + A[0][4 +2][(-8*t1+t6-8)+1] + A[0][4 +2][(-8*t1+t6-8)+2] );; } } for (t5=8*t1+13;t5<=8*t1+14;t5++) { for (t6=8*t1+12;t6<=8*t1+14;t6++) { if ((t1+1)%2 == 0) { A[0][(-8*t1+t5-10)][(-8*t1+t6-10)] = 0.04*( A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-2][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)-1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10) ][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+1][(-8*t1+t6-10)+2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-2] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)-1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+1] + A[1][(-8*t1+t5-10)+2][(-8*t1+t6-10)+2] );; } } } } for (t4=max(max(128*t3,2*t1+2),4*t1-4*t2+5);t4<=min(min(min(floord(16*t2-N+17,4),floord(512*t3-N+513,4)),floord(16*t1-16*t2+N+8,4)),T-1);t4++) { for (t5=-16*t1+16*t2+8*t4-15;t5<=-16*t1+16*t2+8*t4-12;t5++) { for (t6=4*t4+2;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=-16*t1+16*t2+8*t4-11;t5<=4*t4+N-3;t5++) { for (t6=4*t4+2;t6<=4*t4+3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } for (t6=4*t4+4;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } for (t6=4*t4+N-2;t6<=min(512*t3+511,4*t4+N-1);t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } for (t5=4*t4+N-2;t5<=min(16*t2+15,4*t4+N-1);t5++) { for (t6=4*t4+4;t6<=min(512*t3+511,4*t4+N-1);t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t4=max(max(max(ceild(16*t2-N+18,4),128*t3),2*t1+2),4*t1-4*t2+5);t4<=min(min(floord(512*t3-N+513,4),T-1),2*t1+3);t4++) { for (t5=-16*t1+16*t2+8*t4-15;t5<=-16*t1+16*t2+8*t4-12;t5++) { for (t6=4*t4+2;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=-16*t1+16*t2+8*t4-11;t5<=16*t2+15;t5++) { for (t6=4*t4+2;t6<=4*t4+3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } for (t6=4*t4+4;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } for (t6=4*t4+N-2;t6<=min(512*t3+511,4*t4+N-1);t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t4=ceild(16*t1-16*t2+N+9,4);t4<=min(min(min(min(floord(512*t3-N+513,4),floord(16*t1-16*t2+N+10,4)),T-1),2*t1+3),128*t3-1);t4++) { for (t5=-16*t1+16*t2+8*t4-15;t5<=4*t4+N-3;t5++) { for (t6=512*t3;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=-16*t1+16*t2+8*t4-11;t5<=4*t4+N-1;t5++) { for (t6=512*t3;t6<=4*t4+N-1;t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t4=max(max(ceild(512*t3-N+514,4),128*t3),2*t1+2);t4<=min(min(min(floord(16*t2-N+17,4),floord(16*t1-16*t2+N+8,4)),T-1),128*t3+126);t4++) { for (t5=-16*t1+16*t2+8*t4-15;t5<=-16*t1+16*t2+8*t4-12;t5++) { for (t6=4*t4+2;t6<=512*t3+511;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=-16*t1+16*t2+8*t4-11;t5<=4*t4+N-3;t5++) { for (t6=4*t4+2;t6<=4*t4+3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } for (t6=4*t4+4;t6<=512*t3+511;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } for (t5=4*t4+N-2;t5<=min(16*t2+15,4*t4+N-1);t5++) { for (t6=4*t4+4;t6<=512*t3+511;t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t4=max(max(max(max(ceild(16*t2-N+18,4),ceild(512*t3-N+514,4)),128*t3),2*t1+2),4*t1-4*t2+5);t4<=min(min(T-1,2*t1+3),128*t3+126);t4++) { for (t5=-16*t1+16*t2+8*t4-15;t5<=-16*t1+16*t2+8*t4-12;t5++) { for (t6=4*t4+2;t6<=512*t3+511;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=-16*t1+16*t2+8*t4-11;t5<=16*t2+15;t5++) { for (t6=4*t4+2;t6<=4*t4+3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } for (t6=4*t4+4;t6<=512*t3+511;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t4=max(max(ceild(512*t3-N+514,4),ceild(16*t1-16*t2+N+9,4)),128*t3);t4<=min(min(min(floord(16*t1-16*t2+N+10,4),T-1),2*t1+3),128*t3+126);t4++) { for (t5=-16*t1+16*t2+8*t4-15;t5<=4*t4+N-3;t5++) { for (t6=4*t4+2;t6<=512*t3+511;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=-16*t1+16*t2+8*t4-11;t5<=4*t4+N-1;t5++) { for (t6=4*t4+4;t6<=512*t3+511;t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t4=max(max(ceild(16*t1-16*t2+N+9,4),128*t3),4*t1-4*t2+5);t4<=min(min(min(floord(512*t3-N+513,4),floord(16*t1-16*t2+N+10,4)),T-1),2*t1+3);t4++) { for (t5=-16*t1+16*t2+8*t4-15;t5<=4*t4+N-3;t5++) { for (t6=4*t4+2;t6<=4*t4+N-3;t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } for (t5=-16*t1+16*t2+8*t4-11;t5<=4*t4+N-1;t5++) { for (t6=4*t4+4;t6<=4*t4+N-1;t6++) { A[0][(-4*t4+t5-2)][(-4*t4+t6-2)] = 0.04*( A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-2][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)-1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2) ][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+1][(-4*t4+t6-2)+2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-2] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)-1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+1] + A[1][(-4*t4+t5-2)+2][(-4*t4+t6-2)+2] );; } } } for (t4=ceild(16*t1-16*t2+N+11,4);t4<=min(min(min(floord(16*t1-16*t2+N+12,4),T-1),2*t1+3),128*t3+126);t4++) { for (t5=max(4*t4+2,-16*t1+16*t2+8*t4-15);t5<=4*t4+N-3;t5++) { for (t6=max(512*t3,4*t4+2);t6<=min(512*t3+511,4*t4+N-3);t6++) { A[1][(-4*t4+t5)][(-4*t4+t6)] = 0.04*( A[0][(-4*t4+t5)-2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-2][(-4*t4+t6)+2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)-1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)-1][(-4*t4+t6)+2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-2] + A[0][(-4*t4+t5) ][(-4*t4+t6)-1] + A[0][(-4*t4+t5) ][(-4*t4+t6)] + A[0][(-4*t4+t5) ][(-4*t4+t6)+1] + A[0][(-4*t4+t5) ][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+1][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+1][(-4*t4+t6)+2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-2] + A[0][(-4*t4+t5)+2][(-4*t4+t6)-1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+1] + A[0][(-4*t4+t5)+2][(-4*t4+t6)+2] );; } } } if ((t1 == 2*t2) && (t1 <= min(floord(T-4,2),64*t3+60))) { for (t5=8*t1+14;t5<=min(8*t1+15,8*t1+N+9);t5++) { for (t6=max(512*t3,8*t1+14);t6<=min(512*t3+511,8*t1+N+9);t6++) { if (t1%2 == 0) { A[1][(-8*t1+t5-12)][(-8*t1+t6-12)] = 0.04*( A[0][(-8*t1+t5-12)-2][(-8*t1+t6-12)-2] + A[0][(-8*t1+t5-12)-2][(-8*t1+t6-12)-1] + A[0][(-8*t1+t5-12)-2][(-8*t1+t6-12)] + A[0][(-8*t1+t5-12)-2][(-8*t1+t6-12)+1] + A[0][(-8*t1+t5-12)-2][(-8*t1+t6-12)+2] + A[0][(-8*t1+t5-12)-1][(-8*t1+t6-12)-2] + A[0][(-8*t1+t5-12)-1][(-8*t1+t6-12)-1] + A[0][(-8*t1+t5-12)-1][(-8*t1+t6-12)] + A[0][(-8*t1+t5-12)-1][(-8*t1+t6-12)+1] + A[0][(-8*t1+t5-12)-1][(-8*t1+t6-12)+2] + A[0][(-8*t1+t5-12) ][(-8*t1+t6-12)-2] + A[0][(-8*t1+t5-12) ][(-8*t1+t6-12)-1] + A[0][(-8*t1+t5-12) ][(-8*t1+t6-12)] + A[0][(-8*t1+t5-12) ][(-8*t1+t6-12)+1] + A[0][(-8*t1+t5-12) ][(-8*t1+t6-12)+2] + A[0][(-8*t1+t5-12)+1][(-8*t1+t6-12)-2] + A[0][(-8*t1+t5-12)+1][(-8*t1+t6-12)-1] + A[0][(-8*t1+t5-12)+1][(-8*t1+t6-12)] + A[0][(-8*t1+t5-12)+1][(-8*t1+t6-12)+1] + A[0][(-8*t1+t5-12)+1][(-8*t1+t6-12)+2] + A[0][(-8*t1+t5-12)+2][(-8*t1+t6-12)-2] + A[0][(-8*t1+t5-12)+2][(-8*t1+t6-12)-1] + A[0][(-8*t1+t5-12)+2][(-8*t1+t6-12)] + A[0][(-8*t1+t5-12)+2][(-8*t1+t6-12)+1] + A[0][(-8*t1+t5-12)+2][(-8*t1+t6-12)+2] );; } } } } if ((t1 >= max(ceild(16*t2+512*t3-N+496,16),64*t3+62)) && (t3 <= floord(T-128,128))) { for (t5=max(max(16*t2,512*t3+510),-16*t1+16*t2+1024*t3+1001);t5<=min(min(16*t2+15,512*t3+N+505),-16*t1+16*t2+1024*t3+1016);t5++) { for (t6=512*t3+510;t6<=min(512*t3+511,512*t3+N+505);t6++) { A[1][(-512*t3+t5-508)][(-512*t3+t6-508)] = 0.04*( A[0][(-512*t3+t5-508)-2][(-512*t3+t6-508)-2] + A[0][(-512*t3+t5-508)-2][(-512*t3+t6-508)-1] + A[0][(-512*t3+t5-508)-2][(-512*t3+t6-508)] + A[0][(-512*t3+t5-508)-2][(-512*t3+t6-508)+1] + A[0][(-512*t3+t5-508)-2][(-512*t3+t6-508)+2] + A[0][(-512*t3+t5-508)-1][(-512*t3+t6-508)-2] + A[0][(-512*t3+t5-508)-1][(-512*t3+t6-508)-1] + A[0][(-512*t3+t5-508)-1][(-512*t3+t6-508)] + A[0][(-512*t3+t5-508)-1][(-512*t3+t6-508)+1] + A[0][(-512*t3+t5-508)-1][(-512*t3+t6-508)+2] + A[0][(-512*t3+t5-508) ][(-512*t3+t6-508)-2] + A[0][(-512*t3+t5-508) ][(-512*t3+t6-508)-1] + A[0][(-512*t3+t5-508) ][(-512*t3+t6-508)] + A[0][(-512*t3+t5-508) ][(-512*t3+t6-508)+1] + A[0][(-512*t3+t5-508) ][(-512*t3+t6-508)+2] + A[0][(-512*t3+t5-508)+1][(-512*t3+t6-508)-2] + A[0][(-512*t3+t5-508)+1][(-512*t3+t6-508)-1] + A[0][(-512*t3+t5-508)+1][(-512*t3+t6-508)] + A[0][(-512*t3+t5-508)+1][(-512*t3+t6-508)+1] + A[0][(-512*t3+t5-508)+1][(-512*t3+t6-508)+2] + A[0][(-512*t3+t5-508)+2][(-512*t3+t6-508)-2] + A[0][(-512*t3+t5-508)+2][(-512*t3+t6-508)-1] + A[0][(-512*t3+t5-508)+2][(-512*t3+t6-508)] + A[0][(-512*t3+t5-508)+2][(-512*t3+t6-508)+1] + A[0][(-512*t3+t5-508)+2][(-512*t3+t6-508)+2] );; } } } } } } } /* End of CLooG code */ #undef T #define T 16000 // #undef N // #define N 16000L #ifdef TIME gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); printf("|Time taken = %7.5lfs\n", tdiff ); printf("|MFLOPS = %f\n", ((((double)NUM_FP_OPS * N *N * T) / tdiff) / 1000000L)); #endif #ifdef VERIFY for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { total+= A[T%2][i][j] ; } } printf("|sum: %e\t", total); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { sum_err_sqr += (A[T%2][i][j] - (total/N))*(A[T%2][i][j] - (total/N)); } } printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr)); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { chtotal += ((char *)A[T%2][i])[j]; } } printf("|sum(rep(A)) = %d\n", chtotal); #endif return 0; } // icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm // /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/ // /* @ begin PrimeRegTile (scalar_replacement=0; T1t3=8; T1t4=8; ) @*/ // /* @ end @*/
detector.c
#include "darknet.h" #include <stdio.h> #include <dirent.h> #include <unistd.h> #include <sys/stat.h> struct stat st; static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90}; void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear) { list *options = read_data_cfg(datacfg); char *train_images = option_find_str(options, "train", "data/train.list"); char *backup_directory = option_find_str(options, "backup", "/backup/"); srand(time(0)); char *base = basecfg(cfgfile); printf("%s\n", base); float avg_loss = -1; network **nets = calloc(ngpus, sizeof(network*)); srand(time(0)); int seed = rand(); int i; for(i = 0; i < ngpus; ++i){ srand(seed); #ifdef GPU if(gpu_index >= 0){ opencl_set_device(i); } #endif nets[i] = load_network(cfgfile, weightfile, clear); nets[i]->learning_rate *= ngpus; } srand(time(0)); network *net = nets[0]; int imgs = net->batch * net->subdivisions * ngpus; #ifndef BENCHMARK printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); #endif data train, buffer; layer l = net->layers[net->n - 1]; int classes = l.classes; float jitter = l.jitter; list *plist = get_paths(train_images); //int N = plist->size; char **paths = (char **)list_to_array(plist); load_args args = get_base_args(net); args.coords = l.coords; args.paths = paths; args.n = imgs; args.m = plist->size; args.classes = classes; args.jitter = jitter; args.num_boxes = l.max_boxes; args.d = &buffer; args.type = DETECTION_DATA; //args.type = INSTANCE_DATA; args.threads = 64; pthread_t load_thread = load_data(args); #ifdef LOSS_ONLY double time=what_time_is_it_now(); #else double time; #endif int count = 0; if(count == 0) { #ifdef GPU if (gpu_index >= 0) { if (ngpus != 1) sync_nets(nets, ngpus, 0); } #endif char buff[256]; sprintf(buff, "%s/%s.start.conv.weights", backup_directory, base); save_weights(net, buff); } int max_size = ((net->w + net->h)/2); //while(i*imgs < N*120){ while(get_current_batch(net) < net->max_batches){ if(l.random && count++%10 == 0){ #if !defined(BENCHMARK) && !defined(LOSS_ONLY) printf("Resizing\n"); #endif int dim = max_size - ((rand() % 8) * 32); #ifdef BENCHMARK dim = 608; #endif if (get_current_batch(net)+200 > net->max_batches) dim = max_size; if (net->w < dim || net->h < dim) dim = max_size; #if !defined(BENCHMARK) && !defined(LOSS_ONLY) printf("%d\n", dim); #endif args.w = dim; args.h = dim; pthread_join(load_thread, 0); train = buffer; free_data(train); load_thread = load_data(args); #pragma omp parallel for for(i = 0; i < ngpus; ++i){ resize_network(nets[i], dim, dim); } net = nets[0]; } #ifndef LOSS_ONLY time=what_time_is_it_now(); #endif pthread_join(load_thread, 0); train = buffer; load_thread = load_data(args); /* int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[10] + 1 + k*5); if(!b.x) break; printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h); } */ /* int zz; for(zz = 0; zz < train.X.cols; ++zz){ image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]); int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[zz] + k*5, 1); printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); draw_bbox(im, b, 1, 1,0,0); } show_image(im, "truth11"); cvWaitKey(0); save_image(im, "truth11"); } */ #ifndef LOSS_ONLY printf("Loaded: %lf seconds\n", what_time_is_it_now()-time); #endif #ifndef LOSS_ONLY time=what_time_is_it_now(); #endif float loss = 0; #ifdef GPU if (gpu_index >= 0) { if (ngpus == 1) { loss = train_network(net, train); } else { loss = train_networks(nets, ngpus, train, 4); } } else { loss = train_network(net, train); } #else loss = train_network(net, train); #endif if (avg_loss < 0) avg_loss = loss; avg_loss = avg_loss*.9 + loss*.1; i = get_current_batch(net); #ifdef LOSS_ONLY printf("%lf\t%f\n", what_time_is_it_now()-time, loss); #else printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs); #endif #ifdef GPU if (loss != loss && gpu_index >= 0) { opencl_deinit(gpusg, ngpusg); } #endif if(loss != loss) { printf("NaN LOSS detected! No possible to continue!\n"); exit(-7); } if(i%100==0){ #ifdef GPU if (gpu_index >= 0) { if (ngpus != 1) sync_nets(nets, ngpus, 0); } #endif char buff[256]; sprintf(buff, "%s/%s.backup", backup_directory, base); save_weights(net, buff); } if(i%10000==0 || (i < 1000 && i%100 == 0)){ #ifdef GPU if (gpu_index >= 0) { if (ngpus != 1) sync_nets(nets, ngpus, 0); } #endif char buff[256]; sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); save_weights(net, buff); } free_data(train); #ifdef GPU_STATS opencl_dump_mem_stat(); #endif #ifdef BENCHMARK break; #endif } #ifdef GPU if (gpu_index >= 0) { if (ngpus != 1) sync_nets(nets, ngpus, 0); } #endif char buff[256]; sprintf(buff, "%s/%s_final.weights", backup_directory, base); save_weights(net, buff); free(paths); free(plist); free(base); free(nets); free(options); } static int get_coco_image_id(char *filename) { char *p = strrchr(filename, '/'); char *c = strrchr(filename, '_'); if(c) p = c; return atoi(p+1); } static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h) { int i, j; int image_id = get_coco_image_id(image_path); for(i = 0; i < num_boxes; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; float bx = xmin; float by = ymin; float bw = xmax - xmin; float bh = ymax - ymin; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]); } } } void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1; float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1; float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1; float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1; if (xmin < 1) xmin = 1; if (ymin < 1) ymin = 1; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j], xmin, ymin, xmax, ymax); } } } void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ int class = j; if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class], xmin, ymin, xmax, ymax); } } } void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 2); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); image input = make_image(net->w, net->h, net->c*2); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1); flip_image(val_resized[t]); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1); network_predict(net, input.data); int w = val[t].w; int h = val[t].h; int num = 0; int nboxes = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } if (coco){ print_cocos(fp, path, dets, num, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h); } else { print_detector_detections(fps, id, dets, num, classes, w, h); } free_detections(dets, num); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); float *X = val_resized[t].data; network_predict(net, X); int w = val[t].w; int h = val[t].h; int nboxes = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } if (coco){ print_cocos(fp, path, dets, nboxes, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h); } else { print_detector_detections(fps, id, dets, nboxes, classes, w, h); } free_detections(dets, nboxes); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector_recall(char *datacfg, char *cfgfile, char *weightfile) { network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); //list *plist = get_paths("data/coco_val_5k.list"); list *options = read_data_cfg(datacfg); char *test_images = option_find_str(options, "test", "data/test.list"); list *plist = get_paths(test_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int j, k; int m = plist->size; int i=0; float thresh = .001; float iou_thresh = .5; float nms = .4; int total = 0; int correct = 0; int proposals = 0; float avg_iou = 0; for(i = 0; i < m; ++i){ char *path = paths[i]; image orig = load_image_color(path, 0, 0); image sized = resize_image(orig, net->w, net->h); char *id = basecfg(path); network_predict(net, sized.data); int nboxes = 0; detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int num_labels = 0; box_label *truth = read_boxes(labelpath, &num_labels); for(k = 0; k < nboxes; ++k){ if(dets[k].objectness > thresh){ ++proposals; } } for (j = 0; j < num_labels; ++j) { ++total; box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h}; float best_iou = 0; for(k = 0; k < l.w*l.h*l.n; ++k){ float iou = box_iou(dets[k].bbox, t); if(dets[k].objectness > thresh && iou > best_iou){ best_iou = iou; } } avg_iou += best_iou; if(best_iou > iou_thresh){ ++correct; } } fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total); free(id); free_image(orig); free_image(sized); } } void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen) { list *options = read_data_cfg(datacfg); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); image **alphabet = load_alphabet(); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); double time; char buff[256]; char *input = buff; float nms=.45; while(1){ if(filename){ strncpy(input, filename, 256); } else { printf("Enter Image Path: "); fflush(stdout); input = fgets(input, 256, stdin); if(!input) return; strtok(input, "\n"); } image im = load_image_color(input,0,0); int resize = im.w != net->w || im.h != net->h; image sized = resize ? letterbox_image(im, net->w, net->h) : im; //image sized = resize_image(im, net->w, net->h); //image sized2 = resize_max(im, net->w); //image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h); //resize_network(net, sized.w, sized.h); layer l = net->layers[net->n-1]; float *X = sized.data; time=what_time_is_it_now(); if (l.type == DETECTION || l.type == REGION || l.type == YOLO) { network_predict(net, X); } if (l.type == YOLO4) { network_predict_y4(net, X); } printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time); int nboxes = 0; detection *dets = 0; if (l.type == DETECTION || l.type == REGION || l.type == YOLO) { dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); } if (l.type == YOLO4) { dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); } //printf("%d\n", nboxes); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } if (l.type == DETECTION || l.type == REGION || l.type == YOLO) { draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0); } if (l.type == YOLO4) { draw_detections_v3(im, dets, nboxes, thresh, names, alphabet, l.classes, 0); } free_detections(dets, nboxes); if(outfile){ save_image(im, outfile); } else{ save_image(im, "predictions"); #ifdef OPENCV show_image(im, "predictions", 0); #endif } free_image(im); if (resize) free_image(sized); if (filename) break; } } int exists(const char *fname, const char* ext) { FILE *file; if (strstr(fname, ext) && (file = fopen(fname, "r"))) { fclose(file); return 1; } return 0; } int empty(char *dirname) { int n = 0; struct dirent *d; DIR *dir = opendir(dirname); if (dir == NULL) // not a dir or doesn't exist return 1; while ((d = readdir(dir)) != NULL) { if(++n > 2) break; } closedir(dir); if (n <= 2) //dir empty return 1; else return 0; } void test_ddetector(char *datacfg, char *cfgfile, char *weightfile, char *in_dir, float thresh, float hier_thresh, char *out_dir) { list *options = read_data_cfg(datacfg); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); image **alphabet = load_alphabet(); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); double time; char buff[256]; char *input = buff; float nms=.45; char fname[256]; char ffname[1024]; char ffoname[1024]; struct dirent *de = NULL; while(1) { while (empty(in_dir)) { usleep(100); } DIR *dr = opendir(in_dir); while ((de = readdir(dr)) != NULL) { printf("%s\n", de->d_name); strcpy(fname, de->d_name); strcpy(ffname, in_dir); strcat(ffname, "/"); strcat(ffname, fname); if (!exists(ffname, ".jpg")) continue; if (1) { strcpy(ffoname, out_dir); strcat(ffoname, "/"); strcat(ffoname, fname); int len = strlen(ffoname) - 4; ffoname[len] = '\0'; strncpy(input, ffname, 256); } else { printf("Enter Image Path: "); fflush(stdout); input = fgets(input, 256, stdin); if (!input) continue; strtok(input, "\n"); } off_t size = 0; off_t offs = 0; do { offs = size; stat(input, &st); size = st.st_size; if (offs != size) usleep(10); else break; } while (1); image im = load_image_color(input, 0, 0); int resize = im.w != net->w || im.h != net->h; image sized = resize ? letterbox_image(im, net->w, net->h) : im; //image sized = resize_image(im, net->w, net->h); //image sized2 = resize_max(im, net->w); //image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h); //resize_network(net, sized.w, sized.h); layer l = net->layers[net->n - 1]; float *X = sized.data; time = what_time_is_it_now(); network_predict(net, X); printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now() - time); int nboxes = 0; detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); //printf("%d\n", nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0); free_detections(dets, nboxes); free_image(im); if (resize) free_image(sized); // if (filename) break; remove(input); } closedir(dr); } } /* void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; float nms = .45; while(1){ image in = get_image_from_stream_cv(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; float *X = in_s.data; network_predict(net, X); int nboxes = 0; detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int left = b.x-b.w/2.; int top = b.y-b.h/2.; censor_image(in, left, top, b.w, b.h); } } show_image(in, base); cvWaitKey(10); free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream_cv(cap); free_image(in); } } #endif } void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; int count = 0; float nms = .45; while(1){ image in = get_image_from_stream_cv(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; show_image(in, base); int nboxes = 0; float *X = in_s.data; network_predict(net, X); detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h; int dx = b.x*in.w-size/2.; int dy = b.y*in.h-size/2.; image bim = crop_image(in, dx, dy, size, size); char buff[2048]; sprintf(buff, "results/extract/%07d", count); ++count; save_image(bim, buff); free_image(bim); } } free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream_cv(cap); free_image(in); } } #endif } */ /* void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets) { network_predict_image(net, im); layer l = net->layers[net->n-1]; int nboxes = num_boxes(net); fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } } */ void run_detector(int argc, char **argv) { char *prefix = find_char_arg(argc, argv, "-prefix", 0); float thresh = find_float_arg(argc, argv, "-thresh", .5); float hier_thresh = find_float_arg(argc, argv, "-hier", .5); int cam_index = find_int_arg(argc, argv, "-c", 0); int frame_skip = find_int_arg(argc, argv, "-s", 0); int avg = find_int_arg(argc, argv, "-avg", 3); if(argc < 4){ fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); return; } char *gpu_list = find_char_arg(argc, argv, "-gpus", 0); char *outfile = find_char_arg(argc, argv, "-out", 0); int *gpus = 0; int gpu = 0; int ngpus = 0; if(gpu_list){ printf("%s\n", gpu_list); int len = strlen(gpu_list); ngpus = 1; int i; for(i = 0; i < len; ++i){ if (gpu_list[i] == ',') ++ngpus; } gpus = calloc(ngpus, sizeof(int)); for(i = 0; i < ngpus; ++i){ gpus[i] = atoi(gpu_list); gpu_list = strchr(gpu_list, ',')+1; } } else { gpu = gpu_index; gpus = &gpu; ngpus = 1; } int clear = find_arg(argc, argv, "-clear"); int fullscreen = find_arg(argc, argv, "-fullscreen"); int width = find_int_arg(argc, argv, "-w", 0); int height = find_int_arg(argc, argv, "-h", 0); int fps = find_int_arg(argc, argv, "-fps", 0); //int class = find_int_arg(argc, argv, "-class", 0); char *datacfg = argv[3]; char *cfg = argv[4]; char *weights = (argc > 5) ? argv[5] : 0; char *filename = (argc > 6) ? argv[6]: 0; if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen); else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear); else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "recall")) validate_detector_recall(datacfg, cfg, weights); else if(0==strcmp(argv[2], "demo")) { list *options = read_data_cfg(datacfg); int classes = option_find_int(options, "classes", 20); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen); } //else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); //else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); }
libperf.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED. * Copyright (C) ARM Ltd. 2017-2020. ALL RIGHTS RESERVED. * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include <ucs/debug/log.h> #include <ucs/arch/bitops.h> #include <ucs/sys/module.h> #include <ucs/sys/string.h> #include <string.h> #include <tools/perf/lib/libperf_int.h> #include <unistd.h> #if _OPENMP #include <omp.h> #endif /* _OPENMP */ #define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \ _status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \ if (_status != UCS_OK) { \ ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support atomic %s for " \ "message size %zu bytes", UCT_PERF_TEST_PARAMS_ARG(_params), \ (_msg)[_op], (_size)); \ return _status; \ } #define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \ if (!ucs_test_all_flags(_attr, _required)) { \ if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \ ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support required " \ #_size"-bit atomic: %s", UCT_PERF_TEST_PARAMS_ARG(_params), \ (_msg)[ucs_ffs64(~(_attr) & (_required))]); \ } \ return UCS_ERR_UNSUPPORTED; \ } typedef struct { union { struct { size_t dev_addr_len; size_t iface_addr_len; size_t ep_addr_len; } uct; struct { size_t worker_addr_len; size_t total_wireup_len; } ucp; }; size_t rkey_size; unsigned long recv_buffer; } ucx_perf_ep_info_t; const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_LAST]; static const char *perf_iface_ops[] = { [ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short", [ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy", [ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy", [ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short", [ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy", [ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy", [ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short", [ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy", [ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy", [ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler", [ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface", [ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep", [ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability", [ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback", [ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback", [ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy", [ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy", [ucs_ilog2(UCT_IFACE_FLAG_EP_CHECK)] = "ep check", [ucs_ilog2(UCT_IFACE_FLAG_EP_KEEPALIVE)] = "ep keepalive" }; static const char *perf_atomic_op[] = { [UCT_ATOMIC_OP_ADD] = "add", [UCT_ATOMIC_OP_AND] = "and", [UCT_ATOMIC_OP_OR] = "or" , [UCT_ATOMIC_OP_XOR] = "xor" }; static const char *perf_atomic_fop[] = { [UCT_ATOMIC_OP_ADD] = "fetch-add", [UCT_ATOMIC_OP_AND] = "fetch-and", [UCT_ATOMIC_OP_OR] = "fetch-or", [UCT_ATOMIC_OP_XOR] = "fetch-xor", [UCT_ATOMIC_OP_SWAP] = "swap", [UCT_ATOMIC_OP_CSWAP] = "cswap" }; /* * This Quickselect routine is based on the algorithm described in * "Numerical recipes in C", Second Edition, * Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5 * This code by Nicolas Devillard - 1998. Public domain. */ static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n) { int low, high ; int median; int middle, ll, hh; #define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; } low = 0 ; high = n-1 ; median = (low + high) / 2; for (;;) { if (high <= low) /* One element only */ return arr[median] ; if (high == low + 1) { /* Two elements only */ if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ; return arr[median] ; } /* Find median of low, middle and high items; swap into position low */ middle = (low + high) / 2; if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ; if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ; if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ; /* Swap low item (now in position middle) into position (low+1) */ ELEM_SWAP(arr[middle], arr[low+1]) ; /* Nibble from each end towards middle, swapping items when stuck */ ll = low + 1; hh = high; for (;;) { do ll++; while (arr[low] > arr[ll]) ; do hh--; while (arr[hh] > arr[low]) ; if (hh < ll) break; ELEM_SWAP(arr[ll], arr[hh]) ; } /* Swap middle item (in position low) back into correct position */ ELEM_SWAP(arr[low], arr[hh]) ; /* Re-set active partition */ if (hh <= median) low = ll; if (hh >= median) high = hh - 1; } } static ucs_status_t uct_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length, unsigned flags, uct_allocated_memory_t *alloc_mem) { ucs_status_t status; status = uct_iface_mem_alloc(perf->uct.iface, length, flags, "perftest", alloc_mem); if (status != UCS_OK) { ucs_free(alloc_mem); ucs_error("failed to allocate memory: %s", ucs_status_string(status)); return status; } ucs_assert(alloc_mem->md == perf->uct.md); return UCS_OK; } static void uct_perf_test_free_host(const ucx_perf_context_t *perf, uct_allocated_memory_t *alloc_mem) { uct_iface_mem_free(alloc_mem); } static void ucx_perf_test_memcpy_host(void *dst, ucs_memory_type_t dst_mem_type, const void *src, ucs_memory_type_t src_mem_type, size_t count) { if ((dst_mem_type != UCS_MEMORY_TYPE_HOST) || (src_mem_type != UCS_MEMORY_TYPE_HOST)) { ucs_error("wrong memory type passed src - %d, dst - %d", src_mem_type, dst_mem_type); } else { memcpy(dst, src, count); } } static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; ucs_status_t status; unsigned flags; size_t buffer_size; if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) { buffer_size = params->msg_size_cnt * params->iov_stride; } else { buffer_size = ucx_perf_get_message_size(params); } /* TODO use params->alignment */ flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ? UCT_MD_MEM_FLAG_NONBLOCK : 0; flags |= UCT_MD_MEM_ACCESS_ALL; /* Allocate send buffer memory */ status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count, flags, &perf->uct.send_mem); if (status != UCS_OK) { goto err; } perf->send_buffer = perf->uct.send_mem.address; /* Allocate receive buffer memory */ status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count, flags, &perf->uct.recv_mem); if (status != UCS_OK) { goto err_free_send; } perf->recv_buffer = perf->uct.recv_mem.address; /* Allocate IOV datatype memory */ perf->params.msg_size_cnt = params->msg_size_cnt; perf->uct.iov = malloc(sizeof(*perf->uct.iov) * perf->params.msg_size_cnt * params->thread_count); if (NULL == perf->uct.iov) { status = UCS_ERR_NO_MEMORY; ucs_error("Failed allocate send IOV(%lu) buffer: %s", perf->params.msg_size_cnt, ucs_status_string(status)); goto err_free_recv; } ucs_debug("allocated memory. Send buffer %p, Recv buffer %p", perf->send_buffer, perf->recv_buffer); return UCS_OK; err_free_recv: perf->allocator->uct_free(perf, &perf->uct.recv_mem); err_free_send: perf->allocator->uct_free(perf, &perf->uct.send_mem); err: return status; } static void uct_perf_test_free_mem(ucx_perf_context_t *perf) { perf->allocator->uct_free(perf, &perf->uct.send_mem); perf->allocator->uct_free(perf, &perf->uct.recv_mem); free(perf->uct.iov); } void ucx_perf_test_start_clock(ucx_perf_context_t *perf) { ucs_time_t start_time = ucs_get_time(); perf->start_time_acc = ucs_get_accurate_time(); perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX : ucs_time_from_sec(perf->params.max_time) + start_time; perf->prev_time = start_time; perf->prev.time = start_time; perf->prev.time_acc = perf->start_time_acc; perf->current.time_acc = perf->start_time_acc; } /* Initialize/reset all parameters that could be modified by the warm-up run */ static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf, const ucx_perf_params_t *params) { unsigned i; perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX : perf->params.max_iter; perf->report_interval = ucs_time_from_sec(perf->params.report_interval); perf->current.time = 0; perf->current.msgs = 0; perf->current.bytes = 0; perf->current.iters = 0; perf->prev.msgs = 0; perf->prev.bytes = 0; perf->prev.iters = 0; perf->timing_queue_head = 0; for (i = 0; i < TIMING_QUEUE_SIZE; ++i) { perf->timing_queue[i] = 0; } ucx_perf_test_start_clock(perf); } static void ucx_perf_test_init(ucx_perf_context_t *perf, const ucx_perf_params_t *params) { unsigned group_index; perf->params = *params; group_index = rte_call(perf, group_index); if (0 == group_index) { perf->allocator = ucx_perf_mem_type_allocators[params->send_mem_type]; } else { perf->allocator = ucx_perf_mem_type_allocators[params->recv_mem_type]; } ucx_perf_test_prepare_new_run(perf, params); } void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result) { ucs_time_t median; double factor; if ((perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) || (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG_WAIT_MEM)) { factor = 2.0; } else { factor = 1.0; } result->iters = perf->current.iters; result->bytes = perf->current.bytes; result->elapsed_time = perf->current.time_acc - perf->start_time_acc; /* Latency */ median = __find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE); result->latency.typical = ucs_time_to_sec(median) / factor; result->latency.moment_average = (perf->current.time_acc - perf->prev.time_acc) / (perf->current.iters - perf->prev.iters) / factor; result->latency.total_average = (perf->current.time_acc - perf->start_time_acc) / perf->current.iters / factor; /* Bandwidth */ result->bandwidth.typical = 0.0; // Undefined result->bandwidth.moment_average = (perf->current.bytes - perf->prev.bytes) / (perf->current.time_acc - perf->prev.time_acc) * factor; result->bandwidth.total_average = perf->current.bytes / (perf->current.time_acc - perf->start_time_acc) * factor; /* Packet rate */ result->msgrate.typical = 0.0; // Undefined result->msgrate.moment_average = (perf->current.msgs - perf->prev.msgs) / (perf->current.time_acc - perf->prev.time_acc) * factor; result->msgrate.total_average = perf->current.msgs / (perf->current.time_acc - perf->start_time_acc) * factor; } static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params) { size_t it; /* check if zero-size messages are requested and supported */ if ((/* they are not supported by: */ /* - UCT tests, except UCT AM Short/Bcopy */ (params->api == UCX_PERF_API_UCT) || (/* - UCP RMA and AMO tests */ (params->api == UCX_PERF_API_UCP) && (params->command != UCX_PERF_CMD_AM) && (params->command != UCX_PERF_CMD_TAG) && (params->command != UCX_PERF_CMD_TAG_SYNC) && (params->command != UCX_PERF_CMD_STREAM))) && ucx_perf_get_message_size(params) < 1) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size too small, need to be at least 1"); } return UCS_ERR_INVALID_PARAM; } if ((params->api == UCX_PERF_API_UCP) && ((params->send_mem_type != UCS_MEMORY_TYPE_HOST) || (params->recv_mem_type != UCS_MEMORY_TYPE_HOST)) && ((params->command == UCX_PERF_CMD_PUT) || (params->command == UCX_PERF_CMD_GET) || (params->command == UCX_PERF_CMD_ADD) || (params->command == UCX_PERF_CMD_FADD) || (params->command == UCX_PERF_CMD_SWAP) || (params->command == UCX_PERF_CMD_CSWAP))) { /* TODO: remove when support for non-HOST memory types will be added */ if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("UCP doesn't support RMA/AMO for \"%s\"<->\"%s\" memory types", ucs_memory_type_names[params->send_mem_type], ucs_memory_type_names[params->recv_mem_type]); } return UCS_ERR_INVALID_PARAM; } if (params->max_outstanding < 1) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("max_outstanding, need to be at least 1"); } return UCS_ERR_INVALID_PARAM; } /* check if particular message size fit into stride size */ if (params->iov_stride) { for (it = 0; it < params->msg_size_cnt; ++it) { if (params->msg_size_list[it] > params->iov_stride) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Buffer size %lu bigger than stride %lu", params->msg_size_list[it], params->iov_stride); } return UCS_ERR_INVALID_PARAM; } } } return UCS_OK; } void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index) { uct_ep_h ep = perf->uct.peers[peer_index].ep; uct_completion_t comp; ucs_status_t status; int started; started = 0; comp.func = NULL; comp.count = 2; do { if (!started) { status = uct_ep_flush(ep, 0, &comp); if (status == UCS_OK) { --comp.count; } else if (status == UCS_INPROGRESS) { started = 1; } else if (status != UCS_ERR_NO_RESOURCE) { ucs_error("uct_ep_flush() failed: %s", ucs_status_string(status)); return; } } uct_worker_progress(perf->uct.worker); } while (comp.count > 1); } void uct_perf_iface_flush_b(ucx_perf_context_t *perf) { ucs_status_t status; do { status = uct_iface_flush(perf->uct.iface, 0, NULL); uct_worker_progress(perf->uct.worker); } while (status == UCS_INPROGRESS); if (status != UCS_OK) { ucs_error("uct_iface_flush() failed: %s", ucs_status_string(status)); } } static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f, uint64_t bcopy_f, uint64_t zcopy_f) { return ((layout == UCT_PERF_DATA_LAYOUT_SHORT) || (layout == UCT_PERF_DATA_LAYOUT_SHORT_IOV)) ? short_f : (layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f : (layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f : 0; } static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32, uint64_t *op64, uint64_t op) { if (size == sizeof(uint32_t)) { *op32 = UCS_BIT(op); return UCS_OK; } else if (size == sizeof(uint64_t)) { *op64 = UCS_BIT(op); return UCS_OK; } return UCS_ERR_UNSUPPORTED; } static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m, size_t bcopy_m, uint64_t zcopy_m) { return ((layout == UCT_PERF_DATA_LAYOUT_SHORT) || (layout == UCT_PERF_DATA_LAYOUT_SHORT_IOV)) ? short_m : (layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m : (layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m : 0; } static ucs_status_t uct_perf_test_check_md_support(ucx_perf_params_t *params, ucs_memory_type_t mem_type, uct_md_attr_t *md_attr) { if (!(md_attr->cap.access_mem_types & UCS_BIT(mem_type)) && !(md_attr->cap.reg_mem_types & UCS_BIT(mem_type))) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Unsupported memory type %s by "UCT_PERF_TEST_PARAMS_FMT, ucs_memory_type_names[mem_type], UCT_PERF_TEST_PARAMS_ARG(params)); return UCS_ERR_INVALID_PARAM; } } return UCS_OK; } static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params, uct_iface_h iface, uct_md_h md) { uint64_t required_flags = 0; uint64_t atomic_op32 = 0; uint64_t atomic_op64 = 0; uint64_t atomic_fop32 = 0; uint64_t atomic_fop64 = 0; uct_md_attr_t md_attr; uct_iface_attr_t attr; ucs_status_t status; size_t min_size, max_size, max_iov, message_size; status = uct_md_query(md, &md_attr); if (status != UCS_OK) { ucs_error("uct_md_query(%s) failed: %s", params->uct.md_name, ucs_status_string(status)); return status; } status = uct_iface_query(iface, &attr); if (status != UCS_OK) { ucs_error("uct_iface_query("UCT_PERF_TEST_PARAMS_FMT") failed: %s", UCT_PERF_TEST_PARAMS_ARG(params), ucs_status_string(status)); return status; } min_size = 0; max_iov = 1; message_size = ucx_perf_get_message_size(params); switch (params->command) { case UCX_PERF_CMD_AM: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT, UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY); required_flags |= UCT_IFACE_FLAG_CB_SYNC; min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.am.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short, attr.cap.am.max_bcopy, attr.cap.am.max_zcopy); max_iov = attr.cap.am.max_iov; break; case UCX_PERF_CMD_PUT: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT, UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY); min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.put.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short, attr.cap.put.max_bcopy, attr.cap.put.max_zcopy); max_iov = attr.cap.put.max_iov; break; case UCX_PERF_CMD_GET: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT, UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY); min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.get.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short, attr.cap.get.max_bcopy, attr.cap.get.max_zcopy); max_iov = attr.cap.get.max_iov; break; case UCX_PERF_CMD_ADD: ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD, perf_atomic_op, params, status); max_size = 8; break; case UCX_PERF_CMD_FADD: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD, perf_atomic_fop, params, status); max_size = 8; break; case UCX_PERF_CMD_SWAP: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP, perf_atomic_fop, params, status); max_size = 8; break; case UCX_PERF_CMD_CSWAP: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP, perf_atomic_fop, params, status); max_size = 8; break; default: if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Invalid test command"); } return UCS_ERR_INVALID_PARAM; } status = ucx_perf_test_check_params(params); if (status != UCS_OK) { return status; } /* check atomics first */ ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op); ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op); ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop); ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop); /* check iface flags */ if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) && (!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support operation %s", UCT_PERF_TEST_PARAMS_ARG(params), perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]); } return UCS_ERR_UNSUPPORTED; } if (message_size < min_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size (%zu) is smaller than min supported (%zu)", message_size, min_size); } return UCS_ERR_UNSUPPORTED; } if (message_size > max_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size (%zu) is larger than max supported (%zu)", message_size, max_size); } return UCS_ERR_UNSUPPORTED; } if (params->command == UCX_PERF_CMD_AM) { if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) && (params->am_hdr_size != sizeof(uint64_t))) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Short AM header size must be 8 bytes"); } return UCS_ERR_INVALID_PARAM; } if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) && (params->am_hdr_size > attr.cap.am.max_hdr)) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%zu) is larger than max supported (%zu)", params->am_hdr_size, attr.cap.am.max_hdr); } return UCS_ERR_UNSUPPORTED; } if (params->am_hdr_size > message_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%zu) is larger than message size (%zu)", params->am_hdr_size, message_size); } return UCS_ERR_INVALID_PARAM; } if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM flow-control window (%d) too large (should be <= %d)", params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW); } return UCS_ERR_INVALID_PARAM; } if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) && (params->flags & UCX_PERF_TEST_FLAG_VERBOSE)) { ucs_warn("Running active-message test with on-sided progress"); } } if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) || (UCT_PERF_DATA_LAYOUT_SHORT_IOV == params->uct.data_layout)) { if (params->msg_size_cnt > max_iov) { if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) || !params->msg_size_cnt) { ucs_error("Wrong number of IOV entries. Requested is %lu, " "should be in the range 1...%lu", params->msg_size_cnt, max_iov); } return UCS_ERR_UNSUPPORTED; } /* if msg_size_cnt == 1 the message size checked above */ if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && (UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) { if (params->am_hdr_size > params->msg_size_list[0]) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%lu) larger than the first IOV " "message size (%lu)", params->am_hdr_size, params->msg_size_list[0]); } return UCS_ERR_INVALID_PARAM; } } } status = uct_perf_test_check_md_support(params, params->send_mem_type, &md_attr); if (status != UCS_OK) { return status; } status = uct_perf_test_check_md_support(params, params->recv_mem_type, &md_attr); if (status != UCS_OK) { return status; } return UCS_OK; } static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf) { const size_t buffer_size = ADDR_BUF_SIZE; ucx_perf_ep_info_t info, *remote_info; unsigned group_size, i, group_index; uct_device_addr_t *dev_addr; uct_iface_addr_t *iface_addr; uct_ep_addr_t *ep_addr; uct_iface_attr_t iface_attr; uct_md_attr_t md_attr; uct_ep_params_t ep_params; void *rkey_buffer; ucs_status_t status; struct iovec vec[5]; void *buffer; void *req; buffer = malloc(buffer_size); if (buffer == NULL) { ucs_error("Failed to allocate RTE buffer"); status = UCS_ERR_NO_MEMORY; goto err; } status = uct_iface_query(perf->uct.iface, &iface_attr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status)); goto err_free; } status = uct_md_query(perf->uct.md, &md_attr); if (status != UCS_OK) { ucs_error("Failed to uct_md_query: %s", ucs_status_string(status)); goto err_free; } if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) { info.rkey_size = md_attr.rkey_packed_size; } else { info.rkey_size = 0; } info.uct.dev_addr_len = iface_attr.device_addr_len; info.uct.iface_addr_len = iface_attr.iface_addr_len; info.uct.ep_addr_len = iface_attr.ep_addr_len; info.recv_buffer = (uintptr_t)perf->recv_buffer; rkey_buffer = buffer; dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, info.rkey_size); iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, info.uct.dev_addr_len); ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, info.uct.iface_addr_len); ucs_assert_always(UCS_PTR_BYTE_OFFSET(ep_addr, info.uct.ep_addr_len) <= UCS_PTR_BYTE_OFFSET(buffer, buffer_size)); status = uct_iface_get_device_address(perf->uct.iface, dev_addr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_get_device_address: %s", ucs_status_string(status)); goto err_free; } status = uct_iface_get_address(perf->uct.iface, iface_addr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status)); goto err_free; } if (info.rkey_size > 0) { memset(rkey_buffer, 0, info.rkey_size); status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer); if (status != UCS_OK) { ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status)); goto err_free; } } group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers)); if (perf->uct.peers == NULL) { goto err_free; } ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE; ep_params.iface = perf->uct.iface; if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep); if (status != UCS_OK) { ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status)); goto err_destroy_eps; } status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr); if (status != UCS_OK) { ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status)); goto err_destroy_eps; } } } else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR | UCT_EP_PARAM_FIELD_IFACE_ADDR; } vec[0].iov_base = &info; vec[0].iov_len = sizeof(info); vec[1].iov_base = buffer; vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len + info.uct.iface_addr_len + info.uct.ep_addr_len; rte_call(perf, post_vec, vec, 2, &req); rte_call(perf, exchange_vec, req); for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } rte_call(perf, recv, i, buffer, buffer_size, req); remote_info = buffer; rkey_buffer = remote_info + 1; dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, remote_info->rkey_size); iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, remote_info->uct.dev_addr_len); ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, remote_info->uct.iface_addr_len); perf->uct.peers[i].remote_addr = remote_info->recv_buffer; if (!uct_iface_is_reachable(perf->uct.iface, dev_addr, remote_info->uct.iface_addr_len ? iface_addr : NULL)) { ucs_error("Destination is unreachable"); status = UCS_ERR_UNREACHABLE; goto err_destroy_eps; } if (remote_info->rkey_size > 0) { status = uct_rkey_unpack(perf->uct.cmpt, rkey_buffer, &perf->uct.peers[i].rkey); if (status != UCS_OK) { ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status)); goto err_destroy_eps; } } else { perf->uct.peers[i].rkey.handle = NULL; perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY; } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr); } else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { ep_params.dev_addr = dev_addr; ep_params.iface_addr = iface_addr; status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep); } else { status = UCS_ERR_UNSUPPORTED; } if (status != UCS_OK) { ucs_error("Failed to connect endpoint: %s", ucs_status_string(status)); goto err_destroy_eps; } } uct_perf_iface_flush_b(perf); free(buffer); uct_perf_barrier(perf); return UCS_OK; err_destroy_eps: for (i = 0; i < group_size; ++i) { if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) { uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey); } if (perf->uct.peers[i].ep != NULL) { uct_ep_destroy(perf->uct.peers[i].ep); } } free(perf->uct.peers); err_free: free(buffer); err: return status; } static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf) { unsigned group_size, group_index, i; uct_perf_barrier(perf); uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0); group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); for (i = 0; i < group_size; ++i) { if (i != group_index) { if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) { uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey); } if (perf->uct.peers[i].ep) { uct_ep_destroy(perf->uct.peers[i].ep); } } } free(perf->uct.peers); } static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params, ucp_params_t *ucp_params) { ucs_status_t status; size_t message_size; message_size = ucx_perf_get_message_size(params); switch (params->command) { case UCX_PERF_CMD_PUT: case UCX_PERF_CMD_GET: ucp_params->features |= UCP_FEATURE_RMA; break; case UCX_PERF_CMD_ADD: case UCX_PERF_CMD_FADD: case UCX_PERF_CMD_SWAP: case UCX_PERF_CMD_CSWAP: if (message_size == sizeof(uint32_t)) { ucp_params->features |= UCP_FEATURE_AMO32; } else if (message_size == sizeof(uint64_t)) { ucp_params->features |= UCP_FEATURE_AMO64; } else { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Atomic size should be either 32 or 64 bit"); } return UCS_ERR_INVALID_PARAM; } break; case UCX_PERF_CMD_TAG: case UCX_PERF_CMD_TAG_SYNC: ucp_params->features |= UCP_FEATURE_TAG; break; case UCX_PERF_CMD_STREAM: ucp_params->features |= UCP_FEATURE_STREAM; break; default: if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Invalid test command"); } return UCS_ERR_INVALID_PARAM; } if ((params->flags & UCX_PERF_TEST_FLAG_WAKEUP) || (params->wait_mode == UCX_PERF_WAIT_MODE_SLEEP)) { ucp_params->features |= UCP_FEATURE_WAKEUP; } status = ucx_perf_test_check_params(params); if (status != UCS_OK) { return status; } return UCS_OK; } static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype, size_t iovcnt, unsigned thread_count, ucp_dt_iov_t **iov_p) { ucp_dt_iov_t *iov; if (UCP_PERF_DATATYPE_IOV == datatype) { iov = malloc(sizeof(*iov) * iovcnt * thread_count); if (NULL == iov) { ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt); return UCS_ERR_NO_MEMORY; } *iov_p = iov; } return UCS_OK; } static ucs_status_t ucp_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length, void **address_p, ucp_mem_h *memh, int non_blk_flag) { ucp_mem_map_params_t mem_map_params; ucp_mem_attr_t mem_attr; ucs_status_t status; mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS | UCP_MEM_MAP_PARAM_FIELD_LENGTH | UCP_MEM_MAP_PARAM_FIELD_FLAGS; mem_map_params.address = *address_p; mem_map_params.length = length; mem_map_params.flags = UCP_MEM_MAP_ALLOCATE; if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) { mem_map_params.flags |= non_blk_flag; } status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh); if (status != UCS_OK) { goto err; } mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS; status = ucp_mem_query(*memh, &mem_attr); if (status != UCS_OK) { goto err; } *address_p = mem_attr.address; return UCS_OK; err: return status; } static void ucp_perf_test_free_host(const ucx_perf_context_t *perf, void *address, ucp_mem_h memh) { ucs_status_t status; status = ucp_mem_unmap(perf->ucp.context, memh); if (status != UCS_OK) { ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status)); } } static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; ucs_status_t status; size_t buffer_size; if (params->iov_stride) { buffer_size = params->msg_size_cnt * params->iov_stride; } else { buffer_size = ucx_perf_get_message_size(params); } /* Allocate send buffer memory */ perf->send_buffer = NULL; status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count, &perf->send_buffer, &perf->ucp.send_memh, UCP_MEM_MAP_NONBLOCK); if (status != UCS_OK) { goto err; } /* Allocate receive buffer memory */ perf->recv_buffer = NULL; status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count, &perf->recv_buffer, &perf->ucp.recv_memh, 0); if (status != UCS_OK) { goto err_free_send_buffer; } /* Allocate IOV datatype memory */ perf->ucp.send_iov = NULL; status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype, perf->params.msg_size_cnt, params->thread_count, &perf->ucp.send_iov); if (UCS_OK != status) { goto err_free_buffers; } perf->ucp.recv_iov = NULL; status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype, perf->params.msg_size_cnt, params->thread_count, &perf->ucp.recv_iov); if (UCS_OK != status) { goto err_free_send_iov_buffers; } return UCS_OK; err_free_send_iov_buffers: free(perf->ucp.send_iov); err_free_buffers: perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh); err_free_send_buffer: perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh); err: return UCS_ERR_NO_MEMORY; } static void ucp_perf_test_free_mem(ucx_perf_context_t *perf) { free(perf->ucp.recv_iov); free(perf->ucp.send_iov); perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh); perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh); } static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf) { unsigned i, thread_count = perf->params.thread_count; ucs_status_ptr_t *req; ucs_status_t status; for (i = 0; i < thread_count; ++i) { if (perf->ucp.tctx[i].perf.ucp.rkey != NULL) { ucp_rkey_destroy(perf->ucp.tctx[i].perf.ucp.rkey); } if (perf->ucp.tctx[i].perf.ucp.ep != NULL) { req = ucp_ep_close_nb(perf->ucp.tctx[i].perf.ucp.ep, UCP_EP_CLOSE_MODE_FLUSH); if (UCS_PTR_IS_PTR(req)) { do { ucp_worker_progress(perf->ucp.tctx[i].perf.ucp.worker); status = ucp_request_check_status(req); } while (status == UCS_INPROGRESS); ucp_request_release(req); } else if (UCS_PTR_STATUS(req) != UCS_OK) { ucs_warn("failed to close ep %p on thread %d: %s\n", perf->ucp.tctx[i].perf.ucp.ep, i, ucs_status_string(UCS_PTR_STATUS(req))); } } } } static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf, ucs_status_t status) { unsigned group_size = rte_call(perf, group_size); ucs_status_t collective_status = status; struct iovec vec; void *req = NULL; unsigned i; vec.iov_base = &status; vec.iov_len = sizeof(status); rte_call(perf, post_vec, &vec, 1, &req); rte_call(perf, exchange_vec, req); for (i = 0; i < group_size; ++i) { rte_call(perf, recv, i, &status, sizeof(status), req); if (status != UCS_OK) { collective_status = status; } } return collective_status; } static ucs_status_t ucp_perf_test_receive_remote_data(ucx_perf_context_t *perf) { unsigned thread_count = perf->params.thread_count; void *rkey_buffer = NULL; void *req = NULL; unsigned group_size, group_index, i; ucx_perf_ep_info_t *remote_info; ucp_ep_params_t ep_params; ucp_address_t *address; ucs_status_t status; size_t buffer_size; void *buffer; group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); if (group_size != 2) { ucs_error("perftest requires group size to be exactly 2 " "(actual group size: %u)", group_size); return UCS_ERR_UNSUPPORTED; } buffer_size = ADDR_BUF_SIZE * thread_count; buffer = malloc(buffer_size); if (buffer == NULL) { ucs_error("failed to allocate RTE receive buffer"); status = UCS_ERR_NO_MEMORY; goto err; } /* Initialize all endpoints and rkeys to NULL to handle error flow */ for (i = 0; i < thread_count; i++) { perf->ucp.tctx[i].perf.ucp.ep = NULL; perf->ucp.tctx[i].perf.ucp.rkey = NULL; } /* receive the data from the remote peer, extract the address from it * (along with additional wireup info) and create an endpoint to the peer */ rte_call(perf, recv, 1 - group_index, buffer, buffer_size, req); remote_info = buffer; for (i = 0; i < thread_count; i++) { address = (ucp_address_t*)(remote_info + 1); rkey_buffer = UCS_PTR_BYTE_OFFSET(address, remote_info->ucp.worker_addr_len); perf->ucp.tctx[i].perf.ucp.remote_addr = remote_info->recv_buffer; ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS; ep_params.address = address; status = ucp_ep_create(perf->ucp.tctx[i].perf.ucp.worker, &ep_params, &perf->ucp.tctx[i].perf.ucp.ep); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status)); } goto err_free_eps_buffer; } if (remote_info->rkey_size > 0) { status = ucp_ep_rkey_unpack(perf->ucp.tctx[i].perf.ucp.ep, rkey_buffer, &perf->ucp.tctx[i].perf.ucp.rkey); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status)); } goto err_free_eps_buffer; } } else { perf->ucp.tctx[i].perf.ucp.rkey = NULL; } remote_info = UCS_PTR_BYTE_OFFSET(remote_info, remote_info->ucp.total_wireup_len); } free(buffer); return UCS_OK; err_free_eps_buffer: ucp_perf_test_destroy_eps(perf); free(buffer); err: return status; } static ucs_status_t ucp_perf_test_send_local_data(ucx_perf_context_t *perf, uint64_t features) { unsigned i, j, thread_count = perf->params.thread_count; size_t address_length = 0; void *rkey_buffer = NULL; void *req = NULL; ucx_perf_ep_info_t *info; ucp_address_t *address; ucs_status_t status; struct iovec *vec; size_t rkey_size; if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh, &rkey_buffer, &rkey_size); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status)); } goto err; } } else { rkey_size = 0; } /* each thread has an iovec with 3 entries to send to the remote peer: * ep_info, worker_address and rkey buffer */ vec = calloc(3 * thread_count, sizeof(struct iovec)); if (vec == NULL) { ucs_error("failed to allocate iovec"); status = UCS_ERR_NO_MEMORY; goto err_rkey_release; } /* get the worker address created for every thread and send it to the remote * peer */ for (i = 0; i < thread_count; i++) { status = ucp_worker_get_address(perf->ucp.tctx[i].perf.ucp.worker, &address, &address_length); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_worker_get_address() failed: %s", ucs_status_string(status)); } goto err_free_workers_vec; } vec[i * 3].iov_base = malloc(sizeof(*info)); if (vec[i * 3].iov_base == NULL) { ucs_error("failed to allocate vec entry for info"); status = UCS_ERR_NO_MEMORY; ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker); goto err_free_workers_vec; } info = vec[i * 3].iov_base; info->ucp.worker_addr_len = address_length; info->ucp.total_wireup_len = sizeof(*info) + address_length + rkey_size; info->rkey_size = rkey_size; info->recv_buffer = (uintptr_t)perf->ucp.tctx[i].perf.recv_buffer; vec[(i * 3) + 0].iov_len = sizeof(*info); vec[(i * 3) + 1].iov_base = address; vec[(i * 3) + 1].iov_len = address_length; vec[(i * 3) + 2].iov_base = rkey_buffer; vec[(i * 3) + 2].iov_len = info->rkey_size; address_length = 0; } /* send to the remote peer */ rte_call(perf, post_vec, vec, 3 * thread_count, &req); rte_call(perf, exchange_vec, req); if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { ucp_rkey_buffer_release(rkey_buffer); } for (i = 0; i < thread_count; i++) { free(vec[i * 3].iov_base); ucp_worker_release_address(perf->ucp.tctx[i].perf.ucp.worker, vec[(i * 3) + 1].iov_base); } free(vec); return UCS_OK; err_free_workers_vec: for (j = 0; j < i; j++) { ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker); } free(vec); err_rkey_release: if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { ucp_rkey_buffer_release(rkey_buffer); } err: return status; } static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf, uint64_t features) { ucs_status_t status; unsigned i; /* pack the local endpoints data and send to the remote peer */ status = ucp_perf_test_send_local_data(perf, features); if (status != UCS_OK) { goto err; } /* receive remote peer's endpoints' data and connect to them */ status = ucp_perf_test_receive_remote_data(perf); if (status != UCS_OK) { goto err; } /* sync status across all processes */ status = ucp_perf_test_exchange_status(perf, UCS_OK); if (status != UCS_OK) { goto err_destroy_eps; } /* force wireup completion */ for (i = 0; i < perf->params.thread_count; i++) { status = ucp_worker_flush(perf->ucp.tctx[i].perf.ucp.worker); if (status != UCS_OK) { ucs_warn("ucp_worker_flush() failed on theread %d: %s", i, ucs_status_string(status)); } } return status; err_destroy_eps: ucp_perf_test_destroy_eps(perf); err: (void)ucp_perf_test_exchange_status(perf, status); return status; } static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf) { ucp_perf_barrier(perf); ucp_perf_test_destroy_eps(perf); } static void ucp_perf_test_destroy_workers(ucx_perf_context_t *perf) { unsigned i; for (i = 0; i < perf->params.thread_count; i++) { if (perf->ucp.tctx[i].perf.ucp.worker != NULL) { ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker); } } } static void ucx_perf_set_warmup(ucx_perf_context_t* perf, const ucx_perf_params_t* params) { perf->max_iter = ucs_min(params->warmup_iter, ucs_div_round_up(params->max_iter, 10)); perf->report_interval = ULONG_MAX; } static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf) { uct_component_h *uct_components; uct_component_attr_t component_attr; uct_tl_resource_desc_t *tl_resources; unsigned md_index, num_components; unsigned tl_index, num_tl_resources; unsigned cmpt_index; ucs_status_t status; uct_md_h md; uct_md_config_t *md_config; status = uct_query_components(&uct_components, &num_components); if (status != UCS_OK) { goto out; } for (cmpt_index = 0; cmpt_index < num_components; ++cmpt_index) { component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCE_COUNT; status = uct_component_query(uct_components[cmpt_index], &component_attr); if (status != UCS_OK) { goto out_release_components_list; } component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCES; component_attr.md_resources = alloca(sizeof(*component_attr.md_resources) * component_attr.md_resource_count); status = uct_component_query(uct_components[cmpt_index], &component_attr); if (status != UCS_OK) { goto out_release_components_list; } for (md_index = 0; md_index < component_attr.md_resource_count; ++md_index) { status = uct_md_config_read(uct_components[cmpt_index], NULL, NULL, &md_config); if (status != UCS_OK) { goto out_release_components_list; } ucs_strncpy_zero(perf->params.uct.md_name, component_attr.md_resources[md_index].md_name, UCT_MD_NAME_MAX); status = uct_md_open(uct_components[cmpt_index], component_attr.md_resources[md_index].md_name, md_config, &md); uct_config_release(md_config); if (status != UCS_OK) { goto out_release_components_list; } status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources); if (status != UCS_OK) { uct_md_close(md); goto out_release_components_list; } for (tl_index = 0; tl_index < num_tl_resources; ++tl_index) { if (!strcmp(perf->params.uct.tl_name, tl_resources[tl_index].tl_name) && !strcmp(perf->params.uct.dev_name, tl_resources[tl_index].dev_name)) { uct_release_tl_resource_list(tl_resources); perf->uct.cmpt = uct_components[cmpt_index]; perf->uct.md = md; status = UCS_OK; goto out_release_components_list; } } uct_md_close(md); uct_release_tl_resource_list(tl_resources); } } ucs_error("Cannot use "UCT_PERF_TEST_PARAMS_FMT, UCT_PERF_TEST_PARAMS_ARG(&perf->params)); status = UCS_ERR_NO_DEVICE; out_release_components_list: uct_release_component_list(uct_components); out: return status; } void uct_perf_barrier(ucx_perf_context_t *perf) { rte_call(perf, barrier, (void(*)(void*))uct_worker_progress, (void*)perf->uct.worker); } void ucp_perf_barrier(ucx_perf_context_t *perf) { rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress, #if _OPENMP (void*)perf->ucp.tctx[omp_get_thread_num()].perf.ucp.worker); #else (void*)perf->ucp.tctx[0].perf.ucp.worker); #endif } static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; uct_iface_config_t *iface_config; ucs_status_t status; uct_iface_params_t iface_params = { .field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE | UCT_IFACE_PARAM_FIELD_STATS_ROOT | UCT_IFACE_PARAM_FIELD_RX_HEADROOM | UCT_IFACE_PARAM_FIELD_CPU_MASK, .open_mode = UCT_IFACE_OPEN_MODE_DEVICE, .mode.device.tl_name = params->uct.tl_name, .mode.device.dev_name = params->uct.dev_name, .stats_root = ucs_stats_get_root(), .rx_headroom = 0 }; UCS_CPU_ZERO(&iface_params.cpu_mask); status = ucs_async_context_init(&perf->uct.async, params->async_mode); if (status != UCS_OK) { goto out; } status = uct_worker_create(&perf->uct.async, params->thread_mode, &perf->uct.worker); if (status != UCS_OK) { goto out_cleanup_async; } status = uct_perf_create_md(perf); if (status != UCS_OK) { goto out_destroy_worker; } status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL, NULL, &iface_config); if (status != UCS_OK) { goto out_destroy_md; } status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params, iface_config, &perf->uct.iface); uct_config_release(iface_config); if (status != UCS_OK) { ucs_error("Failed to open iface: %s", ucs_status_string(status)); goto out_destroy_md; } status = uct_perf_test_check_capabilities(params, perf->uct.iface, perf->uct.md); /* sync status across all processes */ status = ucp_perf_test_exchange_status(perf, status); if (status != UCS_OK) { goto out_iface_close; } status = uct_perf_test_alloc_mem(perf); if (status != UCS_OK) { goto out_iface_close; } /* Enable progress before `uct_iface_flush` and `uct_worker_progress` called * to give a chance to finish connection for some tranports (ib/ud, tcp). * They may return UCS_INPROGRESS from `uct_iface_flush` when connections are * in progress */ uct_iface_progress_enable(perf->uct.iface, UCT_PROGRESS_SEND | UCT_PROGRESS_RECV); status = uct_perf_test_setup_endpoints(perf); if (status != UCS_OK) { ucs_error("Failed to setup endpoints: %s", ucs_status_string(status)); goto out_free_mem; } return UCS_OK; out_free_mem: uct_perf_test_free_mem(perf); out_iface_close: uct_iface_close(perf->uct.iface); out_destroy_md: uct_md_close(perf->uct.md); out_destroy_worker: uct_worker_destroy(perf->uct.worker); out_cleanup_async: ucs_async_context_cleanup(&perf->uct.async); out: return status; } static void uct_perf_cleanup(ucx_perf_context_t *perf) { uct_perf_test_cleanup_endpoints(perf); uct_perf_test_free_mem(perf); uct_iface_close(perf->uct.iface); uct_md_close(perf->uct.md); uct_worker_destroy(perf->uct.worker); ucs_async_context_cleanup(&perf->uct.async); } static void ucp_perf_request_init(void *req) { ucp_perf_request_t *request = req; request->context = NULL; } static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf) { ucp_params_t ucp_params; ucp_worker_params_t worker_params; ucp_config_t *config; ucs_status_t status; unsigned i, thread_count; size_t message_size; ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES | UCP_PARAM_FIELD_REQUEST_SIZE | UCP_PARAM_FIELD_REQUEST_INIT; ucp_params.features = 0; ucp_params.request_size = sizeof(ucp_perf_request_t); ucp_params.request_init = ucp_perf_request_init; if (perf->params.thread_count > 1) { /* when there is more than one thread, a ucp_worker would be created for * each. all of them will share the same ucp_context */ ucp_params.features |= UCP_PARAM_FIELD_MT_WORKERS_SHARED; ucp_params.mt_workers_shared = 1; } status = ucp_perf_test_fill_params(&perf->params, &ucp_params); if (status != UCS_OK) { goto err; } status = ucp_config_read(NULL, NULL, &config); if (status != UCS_OK) { goto err; } status = ucp_init(&ucp_params, config, &perf->ucp.context); ucp_config_release(config); if (status != UCS_OK) { goto err; } thread_count = perf->params.thread_count; message_size = ucx_perf_get_message_size(&perf->params); status = ucp_perf_test_alloc_mem(perf); if (status != UCS_OK) { ucs_warn("ucp test failed to allocate memory"); goto err_cleanup; } perf->ucp.tctx = calloc(thread_count, sizeof(ucx_perf_thread_context_t)); if (perf->ucp.tctx == NULL) { ucs_warn("ucp test failed to allocate memory for thread contexts"); goto err_free_mem; } worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE; worker_params.thread_mode = perf->params.thread_mode; for (i = 0; i < thread_count; i++) { perf->ucp.tctx[i].tid = i; perf->ucp.tctx[i].perf = *perf; /* Doctor the src and dst buffers to make them thread specific */ perf->ucp.tctx[i].perf.send_buffer = UCS_PTR_BYTE_OFFSET(perf->send_buffer, i * message_size); perf->ucp.tctx[i].perf.recv_buffer = UCS_PTR_BYTE_OFFSET(perf->recv_buffer, i * message_size); status = ucp_worker_create(perf->ucp.context, &worker_params, &perf->ucp.tctx[i].perf.ucp.worker); if (status != UCS_OK) { goto err_free_tctx_destroy_workers; } } status = ucp_perf_test_setup_endpoints(perf, ucp_params.features); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Failed to setup endpoints: %s", ucs_status_string(status)); } goto err_free_tctx_destroy_workers; } return UCS_OK; err_free_tctx_destroy_workers: ucp_perf_test_destroy_workers(perf); free(perf->ucp.tctx); err_free_mem: ucp_perf_test_free_mem(perf); err_cleanup: ucp_cleanup(perf->ucp.context); err: return status; } static void ucp_perf_cleanup(ucx_perf_context_t *perf) { ucp_perf_test_cleanup_endpoints(perf); ucp_perf_barrier(perf); ucp_perf_test_free_mem(perf); ucp_perf_test_destroy_workers(perf); free(perf->ucp.tctx); ucp_cleanup(perf->ucp.context); } static struct { ucs_status_t (*setup)(ucx_perf_context_t *perf); void (*cleanup)(ucx_perf_context_t *perf); ucs_status_t (*run)(ucx_perf_context_t *perf); void (*barrier)(ucx_perf_context_t *perf); } ucx_perf_funcs[] = { [UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup, uct_perf_test_dispatch, uct_perf_barrier}, [UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup, ucp_perf_test_dispatch, ucp_perf_barrier} }; static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result); ucs_status_t ucx_perf_run(const ucx_perf_params_t *params, ucx_perf_result_t *result) { ucx_perf_context_t *perf; ucs_status_t status; ucx_perf_global_init(); if (params->command == UCX_PERF_CMD_LAST) { ucs_error("Test is not selected"); status = UCS_ERR_INVALID_PARAM; goto out; } if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) { ucs_error("Invalid test API parameter (should be UCT or UCP)"); status = UCS_ERR_INVALID_PARAM; goto out; } perf = malloc(sizeof(*perf)); if (perf == NULL) { status = UCS_ERR_NO_MEMORY; goto out; } ucx_perf_test_init(perf, params); if (perf->allocator == NULL) { ucs_error("Unsupported memory types %s<->%s", ucs_memory_type_names[params->send_mem_type], ucs_memory_type_names[params->recv_mem_type]); status = UCS_ERR_UNSUPPORTED; goto out_free; } if ((params->api == UCX_PERF_API_UCT) && (perf->allocator->mem_type != UCS_MEMORY_TYPE_HOST)) { ucs_warn("UCT tests also copy 2-byte values from %s memory to " "%s memory, which may impact performance results", ucs_memory_type_names[perf->allocator->mem_type], ucs_memory_type_names[UCS_MEMORY_TYPE_HOST]); } status = perf->allocator->init(perf); if (status != UCS_OK) { goto out_free; } status = ucx_perf_funcs[params->api].setup(perf); if (status != UCS_OK) { goto out_free; } if (params->thread_count == 1) { if (params->api == UCX_PERF_API_UCP) { perf->ucp.worker = perf->ucp.tctx[0].perf.ucp.worker; perf->ucp.ep = perf->ucp.tctx[0].perf.ucp.ep; perf->ucp.remote_addr = perf->ucp.tctx[0].perf.ucp.remote_addr; perf->ucp.rkey = perf->ucp.tctx[0].perf.ucp.rkey; } if (params->warmup_iter > 0) { ucx_perf_set_warmup(perf, params); status = ucx_perf_funcs[params->api].run(perf); if (status != UCS_OK) { goto out_cleanup; } ucx_perf_funcs[params->api].barrier(perf); ucx_perf_test_prepare_new_run(perf, params); } /* Run test */ status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (status == UCS_OK) { ucx_perf_calc_result(perf, result); rte_call(perf, report, result, perf->params.report_arg, 1, 0); } } else { status = ucx_perf_thread_spawn(perf, result); } out_cleanup: ucx_perf_funcs[params->api].cleanup(perf); out_free: free(perf); out: return status; } #if _OPENMP static ucs_status_t ucx_perf_thread_run_test(void* arg) { ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; /* a single thread context */ ucx_perf_result_t* result = &tctx->result; ucx_perf_context_t* perf = &tctx->perf; ucx_perf_params_t* params = &perf->params; ucs_status_t status; /* new threads need explicit device association */ status = perf->allocator->init(perf); if (status != UCS_OK) { goto out; } if (params->warmup_iter > 0) { ucx_perf_set_warmup(perf, params); status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (UCS_OK != status) { goto out; } ucx_perf_test_prepare_new_run(perf, params); } /* Run test */ #pragma omp barrier status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (UCS_OK != status) { goto out; } ucx_perf_calc_result(perf, result); out: return status; } static void ucx_perf_thread_report_aggregated_results(ucx_perf_context_t *perf) { ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */ unsigned i, thread_count = perf->params.thread_count; double lat_sum_total_avegare = 0.0; ucx_perf_result_t agg_result; agg_result.iters = tctx[0].result.iters; agg_result.bytes = tctx[0].result.bytes; agg_result.elapsed_time = tctx[0].result.elapsed_time; agg_result.bandwidth.total_average = 0.0; agg_result.bandwidth.typical = 0.0; /* Undefined since used only for latency calculations */ agg_result.latency.total_average = 0.0; agg_result.msgrate.total_average = 0.0; agg_result.msgrate.typical = 0.0; /* Undefined since used only for latency calculations */ /* when running with multiple threads, the moment average value is * undefined since we don't capture the values of the last iteration */ agg_result.msgrate.moment_average = 0.0; agg_result.bandwidth.moment_average = 0.0; agg_result.latency.moment_average = 0.0; agg_result.latency.typical = 0.0; /* in case of multiple threads, we have to aggregate the results so that the * final output of the result would show the performance numbers that were * collected from all the threads. * BW and message rate values will be the sum of their values from all * the threads, while the latency value is the average latency from the * threads. */ for (i = 0; i < thread_count; i++) { agg_result.bandwidth.total_average += tctx[i].result.bandwidth.total_average; agg_result.msgrate.total_average += tctx[i].result.msgrate.total_average; lat_sum_total_avegare += tctx[i].result.latency.total_average; } agg_result.latency.total_average = lat_sum_total_avegare / thread_count; rte_call(perf, report, &agg_result, perf->params.report_arg, 1, 1); } static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result) { ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */ int ti, thread_count = perf->params.thread_count; ucs_status_t* statuses; ucs_status_t status; omp_set_num_threads(thread_count); statuses = calloc(thread_count, sizeof(ucs_status_t)); if (statuses == NULL) { status = UCS_ERR_NO_MEMORY; goto out; } #pragma omp parallel private(ti) { ti = omp_get_thread_num(); tctx[ti].status = ucx_perf_thread_run_test((void*)&tctx[ti]); } status = UCS_OK; for (ti = 0; ti < thread_count; ti++) { if (UCS_OK != tctx[ti].status) { ucs_error("Thread %d failed to run test: %s", tctx[ti].tid, ucs_status_string(tctx[ti].status)); status = tctx[ti].status; } } ucx_perf_thread_report_aggregated_results(perf); free(statuses); out: return status; } #else static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result) { ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)"); return UCS_ERR_INVALID_PARAM; } #endif /* _OPENMP */ void ucx_perf_global_init() { static ucx_perf_allocator_t host_allocator = { .mem_type = UCS_MEMORY_TYPE_HOST, .init = ucs_empty_function_return_success, .ucp_alloc = ucp_perf_test_alloc_host, .ucp_free = ucp_perf_test_free_host, .uct_alloc = uct_perf_test_alloc_host, .uct_free = uct_perf_test_free_host, .memcpy = ucx_perf_test_memcpy_host, .memset = memset }; UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest); ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_HOST] = &host_allocator; /* FIXME Memtype allocator modules must be loaded to global scope, otherwise * alloc hooks, which are using dlsym() to get pointer to original function, * do not work. Need to use bistro for memtype hooks to fix it. */ UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL); }
1740.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (i, j, k) num_threads(#P11) { /* E := A*B */ for (i = 0; i < _PB_NI; i++) { #pragma omp for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ for (i = 0; i < _PB_NJ; i++) { #pragma omp for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ for (i = 0; i < _PB_NI; i++) { #pragma omp for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
basic_parallel_for.c
#include <stdio.h> #include <omp.h> int main() { #pragma omp parallel for for(int i = 0; i < 10; i++) { printf("%d\n", i); } }
GB_unop__identity_int16_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int16_fc64 // op(A') function: GB_unop_tran__identity_int16_fc64 // C type: int16_t // A type: GxB_FC64_t // cast: int16_t cij = GB_cast_to_int16_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = GB_cast_to_int16_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = GB_cast_to_int16_t (creal (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int16_fc64 ( int16_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; int16_t z = GB_cast_to_int16_t (creal (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; int16_t z = GB_cast_to_int16_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int16_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lockarray.c
/* test fine grained locks instead of critical section by Chunhua Liao */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #define LOCKNUM 100 #endif #define SIZE 5000 int main(void) { int a[SIZE]; int i,j,sum,lock_index; #ifdef _OPENMP omp_lock_t lck[LOCKNUM]; for (i=0;i<LOCKNUM;i++) omp_init_lock(&(lck[i])); #endif for (i=0;i<SIZE;i++) a[i]=0; #pragma omp parallel private (i,j,lock_index) { /*critical version*/ #pragma omp for schedule(dynamic,1) for (i=0;i<SIZE;i++) { j=(i*i)%SIZE; #pragma omp critical { a[j]=a[j]+5; } } /* fine grained lock version*/ #pragma omp for schedule(dynamic,1) for (i=0;i<SIZE;i++) { j=(i*i)%SIZE; #ifdef _OPENMP lock_index= j%LOCKNUM; // omp_set_lock(lck[lock_index]); #endif a[j]=a[j]-5; #ifdef _OPENMP // omp_unset_lock(lck[lock_index]); #endif } /*verify the result*/ sum=0; #pragma omp for reduction (+:sum) for (i=0;i<SIZE;i++) { sum+=a[i]; } } /* destroy locks*/ #ifdef _OPENMP for (i=0;i<LOCKNUM;i++) omp_destroy_lock(&(lck[i])); #endif printf("sum of a[] = %d\n",sum); }
test.h
#ifndef test_h #define test_h #include <cassert> #include "build_list.h" #include "exafmm_t.h" #include "fmm_base.h" namespace exafmm_t { //! A derived FMM class, assuming that all sources have a unit charge. Kernel functions only compute monopoles. This is used for testing tree and list construction. template <typename T> class DummyFmm : public FmmBase<T> { public: DummyFmm() {} DummyFmm(int ncrit_) { this->p = 1; this->nsurf = 1; this->ncrit = ncrit_;} /** * @brief Given the octant of a node, return a triplet's relative postion to its parent * * @param octant Octant of a node, integer from 0 to 7 * * @return Triplet of node's relative position to its parent, each element is -1 or 1 */ ivec3 octant2coord(int octant) { ivec3 rel_coord; rel_coord[0] = octant & 1 ? 1 : -1; rel_coord[1] = octant & 2 ? 1 : -1; rel_coord[2] = octant & 4 ? 1 : -1; return rel_coord; } //! Dummy P2M operator. void P2M(NodePtrs<T>& leafs) { #pragma omp parallel for for(size_t i=0; i<leafs.size(); ++i) { Node<T>* leaf = leafs[i]; leaf->up_equiv[0] += leaf->nsrcs; } } //! Dummy M2M operator. void M2M(Node<T>* node) { if(node->is_leaf) return; for(int octant=0; octant<8; ++octant) { if(node->children[octant]) #pragma omp task untied M2M(node->children[octant]); } #pragma omp taskwait for(int octant=0; octant<8; octant++) { if(node->children[octant]) { Node<T>* child = node->children[octant]; node->up_equiv[0] += child->up_equiv[0]; } } } //! Dummy M2L operator. void M2L(NodePtrs<T>& nonleafs) { #pragma omp parallel for schedule(dynamic) for (size_t i=0; i<nonleafs.size(); ++i) { Node<T>* trg_parent = nonleafs[i]; NodePtrs<T>& M2L_list = trg_parent->M2L_list; for (size_t j=0; j<M2L_list.size(); ++j) { Node<T>* src_parent = M2L_list[j]; if (src_parent) { for (int src_octant=0; src_octant<8; ++src_octant) { Node<T>* src_child = src_parent->children[src_octant]; for (int trg_octant=0; trg_octant<8; ++trg_octant) { Node<T>* trg_child = trg_parent->children[trg_octant]; if (!is_adjacent(src_child->key, trg_child->key)) { trg_child->dn_equiv[0] += src_child->up_equiv[0]; } } } } } } } //! Dummy P2L operator. void P2L(Nodes<T>& nodes) { #pragma omp parallel for schedule(dynamic) for(size_t i=0; i<nodes.size(); ++i) { NodePtrs<T>& P2L_list = nodes[i].P2L_list; for(size_t j=0; j<P2L_list.size(); ++j) { nodes[i].dn_equiv[0] += P2L_list[j]->nsrcs; } } } //! Dummy M2P operator. void M2P(NodePtrs<T>& leafs) { #pragma omp parallel for schedule(dynamic) for(size_t i=0; i<leafs.size(); ++i) { if (leafs[i]->ntrgs == 0) continue; NodePtrs<T>& M2P_list = leafs[i]->M2P_list; for (size_t j=0; j<M2P_list.size(); ++j) { leafs[i]->trg_value[0] += M2P_list[j]->up_equiv[0]; } } } //! Dummy L2L operator. void L2L(Node<T>* node) { if(node->is_leaf) return; for(int octant=0; octant<8; octant++) { if(node->children[octant]) { Node<T>* child = node->children[octant]; child->dn_equiv[0] += node->dn_equiv[0]; } } for(int octant=0; octant<8; ++octant) { if(node->children[octant]) #pragma omp task untied L2L(node->children[octant]); } #pragma omp taskwait } //! Dummy L2P operator. void L2P(NodePtrs<T>& leafs) { #pragma omp parallel for for(size_t i=0; i<leafs.size(); ++i) { Node<T>* leaf = leafs[i]; if (leaf->ntrgs==0) continue; leaf->trg_value[0] += leaf->dn_equiv[0]; } } //! Dummy P2P operator. void P2P(NodePtrs<T>& leafs) { #pragma omp parallel for schedule(dynamic) for(size_t i=0; i<leafs.size(); ++i) { Node<T>* leaf = leafs[i]; if (leaf->ntrgs==0) continue; NodePtrs<T>& P2P_list = leaf->P2P_list; for(size_t j=0; j<P2P_list.size(); ++j) { leaf->trg_value[0] += P2P_list[j]->nsrcs; } } } // below are the virtual methods define in FmmBase class void potential_P2P(RealVec& src_coord, std::vector<T>& src_value, RealVec& trg_coord, std::vector<T>& trg_value) {} void gradient_P2P(RealVec& src_coord, std::vector<T>& src_value, RealVec& trg_coord, std::vector<T>& trg_value) {} void M2L(Nodes<T>& nodes) {} }; /** * @brief A helper function to build the tree needed in kernel test. * * @tparam T Real or complex type. * @param parent Pointer to parent node. * @param first_child Pointer to first child node. */ template <typename T> void set_children(Node<T>* parent, Node<T>* first_child) { parent->is_leaf = false; for (int octant=0; octant<8; ++octant) { Node<T>* child = first_child + octant; child->octant = octant; child->parent = parent; child->level = parent->level + 1; child->x = parent->x; child->r = parent->r / 2; for (int d=0; d<3; d++) { child->x[d] += child->r * (((octant & 1 << d) >> d) * 2 - 1); } parent->children.push_back(child); } } }// end namespace #endif
FGP_dTV_core.c
/* * This work is part of the Core Imaging Library developed by * Visual Analytics and Imaging System Group of the Science Technology * Facilities Council, STFC * * Copyright 2019 Daniil Kazantsev * Copyright 2019 Srikanth Nagella, Edoardo Pasca * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "FGP_dTV_core.h" /* C-OMP implementation of FGP-dTV [1,2] denoising/regularization model (2D/3D case) * which employs structural similarity of the level sets of two images/volumes, see [1,2] * The current implementation updates image 1 while image 2 is being fixed. * * Input Parameters: * 1. Noisy image/volume [REQUIRED] * 2. Additional reference image/volume of the same dimensions as (1) [REQUIRED] * 3. lambdaPar - regularization parameter [REQUIRED] * 4. Number of iterations [OPTIONAL] * 5. eplsilon: tolerance constant [OPTIONAL] * 6. eta: smoothing constant to calculate gradient of the reference [OPTIONAL] * * 7. TV-type: methodTV - 'iso' (0) or 'l1' (1) [OPTIONAL] * 8. nonneg: 'nonnegativity (0 is OFF by default) [OPTIONAL] * 9. print information: 0 (off) or 1 (on) [OPTIONAL] * * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the Matlab's codes and papers by * [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems" * [2] M. J. Ehrhardt and M. M. Betcke, Multi-Contrast MRI Reconstruction with Structure-Guided Total Variation, SIAM Journal on Imaging Sciences 9(3), pp. 1084–1106 */ float dTV_FGP_CPU_main(float *Input, float *InputRef, float *Output, float *infovector, float lambdaPar, int iterationsNumb, float epsil, float eta, int methodTV, int nonneg, int dimX, int dimY, int dimZ) { int ll; long j, DimTotal; float re, re1; re = 0.0f; re1 = 0.0f; float tk = 1.0f; float tkp1=1.0f; int count = 0; float *Output_prev=NULL, *P1=NULL, *P2=NULL, *P1_prev=NULL, *P2_prev=NULL, *R1=NULL, *R2=NULL, *InputRef_x=NULL, *InputRef_y=NULL; DimTotal = (long)(dimX*dimY*dimZ); if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float)); P1 = calloc(DimTotal, sizeof(float)); P2 = calloc(DimTotal, sizeof(float)); P1_prev = calloc(DimTotal, sizeof(float)); P2_prev = calloc(DimTotal, sizeof(float)); R1 = calloc(DimTotal, sizeof(float)); R2 = calloc(DimTotal, sizeof(float)); InputRef_x = calloc(DimTotal, sizeof(float)); InputRef_y = calloc(DimTotal, sizeof(float)); if (dimZ <= 1) { /*2D case */ /* calculate gradient field (smoothed) for the reference image */ GradNorm_func2D(InputRef, InputRef_x, InputRef_y, eta, (long)(dimX), (long)(dimY)); /* begin iterations */ for(ll=0; ll<iterationsNumb; ll++) { if ((epsil != 0.0f) && (ll % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), 1l); /*projects a 2D vector field R-1,2 onto the orthogonal complement of another 2D vector field InputRef_xy*/ ProjectVect_func2D(R1, R2, InputRef_x, InputRef_y, (long)(dimX), (long)(dimY)); /* computing the gradient of the objective function */ Obj_dfunc2D(Input, Output, R1, R2, lambdaPar, (long)(dimX), (long)(dimY)); /* apply nonnegativity */ if (nonneg == 1) for(j=0; j<DimTotal; j++) {if (Output[j] < 0.0f) Output[j] = 0.0f;} /*Taking a step towards minus of the gradient*/ Grad_dfunc2D(P1, P2, Output, R1, R2, InputRef_x, InputRef_y, lambdaPar, (long)(dimX), (long)(dimY)); /* projection step */ Proj_func2D(P1, P2, methodTV, DimTotal); /*updating R and t*/ tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; Rupd_dfunc2D(P1, P1_prev, P2, P2_prev, R1, R2, tkp1, tk, DimTotal); copyIm(P1, P1_prev, (long)(dimX), (long)(dimY), 1l); copyIm(P2, P2_prev, (long)(dimX), (long)(dimY), 1l); tk = tkp1; /* check early stopping criteria */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(Output[j] - Output_prev[j],2); re1 += powf(Output[j],2); } re = sqrtf(re)/sqrtf(re1); if (re < epsil) count++; if (count > 3) break; } } } else { /*3D case*/ float *P3=NULL, *P3_prev=NULL, *R3=NULL, *InputRef_z=NULL; P3 = calloc(DimTotal, sizeof(float)); P3_prev = calloc(DimTotal, sizeof(float)); R3 = calloc(DimTotal, sizeof(float)); InputRef_z = calloc(DimTotal, sizeof(float)); /* calculate gradient field (smoothed) for the reference volume */ GradNorm_func3D(InputRef, InputRef_x, InputRef_y, InputRef_z, eta, (long)(dimX), (long)(dimY), (long)(dimZ)); /* begin iterations */ for(ll=0; ll<iterationsNumb; ll++) { if ((epsil != 0.0f) && (ll % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); /*projects a 3D vector field R-1,2,3 onto the orthogonal complement of another 3D vector field InputRef_xyz*/ ProjectVect_func3D(R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, (long)(dimX), (long)(dimY), (long)(dimZ)); /* computing the gradient of the objective function */ Obj_dfunc3D(Input, Output, R1, R2, R3, lambdaPar, (long)(dimX), (long)(dimY), (long)(dimZ)); /* apply nonnegativity */ if (nonneg == 1) for(j=0; j<DimTotal; j++) {if (Output[j] < 0.0f) Output[j] = 0.0f;} /*Taking a step towards minus of the gradient*/ Grad_dfunc3D(P1, P2, P3, Output, R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, lambdaPar, (long)(dimX), (long)(dimY), (long)(dimZ)); /* projection step */ Proj_func3D(P1, P2, P3, methodTV, DimTotal); /*updating R and t*/ tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; Rupd_dfunc3D(P1, P1_prev, P2, P2_prev, P3, P3_prev, R1, R2, R3, tkp1, tk, DimTotal); /*storing old values*/ copyIm(P1, P1_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); copyIm(P2, P2_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); copyIm(P3, P3_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); tk = tkp1; /* check early stopping criteria */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(Output[j] - Output_prev[j],2); re1 += powf(Output[j],2); } re = sqrtf(re)/sqrtf(re1); if (re < epsil) count++; if (count > 3) break; } } free(P3); free(P3_prev); free(R3); free(InputRef_z); } if (epsil != 0.0f) free(Output_prev); free(P1); free(P2); free(P1_prev); free(P2_prev); free(R1); free(R2); free(InputRef_x); free(InputRef_y); /*adding info into info_vector */ infovector[0] = (float)(ll); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ return 0; } /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ float GradNorm_func2D(float *B, float *B_x, float *B_y, float eta, long dimX, long dimY) { long i,j,index; float val1, val2, gradX, gradY, magn; float eta_sq = eta*eta; #pragma omp parallel for shared(B, B_x, B_y) private(i,j,index,val1,val2,gradX,gradY,magn) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; /* zero boundary conditions */ if (i == dimX-1) {val1 = 0.0f;} else {val1 = B[j*dimX + (i+1)];} if (j == dimY-1) {val2 = 0.0f;} else {val2 = B[(j+1)*dimX + i];} gradX = val1 - B[index]; gradY = val2 - B[index]; magn = gradX*gradX + gradY*gradY; magn = sqrt(magn + eta_sq); /* the eta-smoothed gradients magnitude */ B_x[index] = gradX/magn; B_y[index] = gradY/magn; }} return 1; } float ProjectVect_func2D(float *R1, float *R2, float *B_x, float *B_y, long dimX, long dimY) { long i,j,index; float in_prod; #pragma omp parallel for shared(R1, R2, B_x, B_y) private(index,i,j,in_prod) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; in_prod = R1[index]*B_x[index] + R2[index]*B_y[index]; /* calculate inner product */ R1[index] = R1[index] - in_prod*B_x[index]; R2[index] = R2[index] - in_prod*B_y[index]; }} return 1; } float Obj_dfunc2D(float *A, float *D, float *R1, float *R2, float lambda, long dimX, long dimY) { float val1, val2; long i,j,index; #pragma omp parallel for shared(A,D,R1,R2) private(index,i,j,val1,val2) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; /* boundary conditions */ if (i == 0) {val1 = 0.0f;} else {val1 = R1[j*dimX + (i-1)];} if (j == 0) {val2 = 0.0f;} else {val2 = R2[(j-1)*dimX + i];} D[index] = A[index] - lambda*(R1[index] + R2[index] - val1 - val2); }} return *D; } float Grad_dfunc2D(float *P1, float *P2, float *D, float *R1, float *R2, float *B_x, float *B_y, float lambda, long dimX, long dimY) { float val1, val2, multip, in_prod; long i,j,index; multip = (1.0f/(8.0f*lambda)); #pragma omp parallel for shared(P1,P2,D,R1,R2,B_x,B_y,multip) private(i,j,index,val1,val2,in_prod) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; /* boundary conditions */ if (i == dimX-1) val1 = 0.0f; else val1 = D[index] - D[j*dimX + (i+1)]; if (j == dimY-1) val2 = 0.0f; else val2 = D[index] - D[(j+1)*dimX + i]; in_prod = val1*B_x[index] + val2*B_y[index]; /* calculate inner product */ val1 = val1 - in_prod*B_x[index]; val2 = val2 - in_prod*B_y[index]; P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; }} return 1; } float Rupd_dfunc2D(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, long DimTotal) { long i; float multip; multip = ((tk-1.0f)/tkp1); #pragma omp parallel for shared(P1,P2,P1_old,P2_old,R1,R2,multip) private(i) for(i=0; i<DimTotal; i++) { R1[i] = P1[i] + multip*(P1[i] - P1_old[i]); R2[i] = P2[i] + multip*(P2[i] - P2_old[i]); } return 1; } /********************************************************************/ /***************************3D Functions*****************************/ /********************************************************************/ float GradNorm_func3D(float *B, float *B_x, float *B_y, float *B_z, float eta, long dimX, long dimY, long dimZ) { long i, j, k, index; float val1, val2, val3, gradX, gradY, gradZ, magn; float eta_sq = eta*eta; #pragma omp parallel for shared(B, B_x, B_y, B_z) private(i,j,k,index,val1,val2,val3,gradX,gradY,gradZ,magn) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; /* zero boundary conditions */ if (i == dimX-1) {val1 = 0.0f;} else {val1 = B[(dimX*dimY)*k + j*dimX+(i+1)];} if (j == dimY-1) {val2 = 0.0f;} else {val2 = B[(dimX*dimY)*k + (j+1)*dimX+i];} if (k == dimZ-1) {val3 = 0.0f;} else {val3 = B[(dimX*dimY)*(k+1) + (j)*dimX+i];} gradX = val1 - B[index]; gradY = val2 - B[index]; gradZ = val3 - B[index]; magn = gradX*gradX + gradY*gradY + gradZ*gradZ; magn = sqrt(magn + eta_sq); /* the eta-smoothed gradients magnitude */ B_x[index] = gradX/magn; B_y[index] = gradY/magn; B_z[index] = gradZ/magn; }}} return 1; } float ProjectVect_func3D(float *R1, float *R2, float *R3, float *B_x, float *B_y, float *B_z, long dimX, long dimY, long dimZ) { long i,j,k,index; float in_prod; #pragma omp parallel for shared(R1, R2, R3, B_x, B_y, B_z) private(index,i,j,k,in_prod) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; in_prod = R1[index]*B_x[index] + R2[index]*B_y[index] + R3[index]*B_z[index]; /* calculate inner product */ R1[index] = R1[index] - in_prod*B_x[index]; R2[index] = R2[index] - in_prod*B_y[index]; R3[index] = R3[index] - in_prod*B_z[index]; }}} return 1; } float Obj_dfunc3D(float *A, float *D, float *R1, float *R2, float *R3, float lambda, long dimX, long dimY, long dimZ) { float val1, val2, val3; long i,j,k,index; #pragma omp parallel for shared(A,D,R1,R2,R3) private(index,i,j,k,val1,val2,val3) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; /* boundary conditions */ if (i == 0) {val1 = 0.0f;} else {val1 = R1[(dimX*dimY)*k + j*dimX + (i-1)];} if (j == 0) {val2 = 0.0f;} else {val2 = R2[(dimX*dimY)*k + (j-1)*dimX + i];} if (k == 0) {val3 = 0.0f;} else {val3 = R3[(dimX*dimY)*(k-1) + j*dimX + i];} D[index] = A[index] - lambda*(R1[index] + R2[index] + R3[index] - val1 - val2 - val3); }}} return *D; } float Grad_dfunc3D(float *P1, float *P2, float *P3, float *D, float *R1, float *R2, float *R3, float *B_x, float *B_y, float *B_z, float lambda, long dimX, long dimY, long dimZ) { float val1, val2, val3, multip, in_prod; long i,j,k, index; multip = (1.0f/(26.0f*lambda)); #pragma omp parallel for shared(P1,P2,P3,D,R1,R2,R3,multip) private(index,i,j,k,val1,val2,val3,in_prod) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; /* boundary conditions */ if (i == dimX-1) val1 = 0.0f; else val1 = D[index] - D[(dimX*dimY)*k + j*dimX + (i+1)]; if (j == dimY-1) val2 = 0.0f; else val2 = D[index] - D[(dimX*dimY)*k + (j+1)*dimX + i]; if (k == dimZ-1) val3 = 0.0f; else val3 = D[index] - D[(dimX*dimY)*(k+1) + j*dimX + i]; in_prod = val1*B_x[index] + val2*B_y[index] + val3*B_z[index]; /* calculate inner product */ val1 = val1 - in_prod*B_x[index]; val2 = val2 - in_prod*B_y[index]; val3 = val3 - in_prod*B_z[index]; P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; P3[index] = R3[index] + multip*val3; }}} return 1; } float Rupd_dfunc3D(float *P1, float *P1_old, float *P2, float *P2_old, float *P3, float *P3_old, float *R1, float *R2, float *R3, float tkp1, float tk, long DimTotal) { long i; float multip; multip = ((tk-1.0f)/tkp1); #pragma omp parallel for shared(P1,P2,P3,P1_old,P2_old,P3_old,R1,R2,R3,multip) private(i) for(i=0; i<DimTotal; i++) { R1[i] = P1[i] + multip*(P1[i] - P1_old[i]); R2[i] = P2[i] + multip*(P2[i] - P2_old[i]); R3[i] = P3[i] + multip*(P3[i] - P3_old[i]); } return 1; }
mpi_io.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "../splatt_mpi.h" #include "../io.h" #include "../timer.h" #include "../util.h" #include "../ccp/ccp.h" /****************************************************************************** * API FUNCTONS *****************************************************************************/ int splatt_mpi_csf_load( char const * const fname, splatt_idx_t * nmodes, splatt_csf ** tensors, double const * const options, MPI_Comm comm) { sptensor_t * tt = NULL; int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); return SPLATT_SUCCESS; } int splatt_mpi_coord_load( char const * const fname, splatt_idx_t * nmodes, splatt_idx_t * nnz, splatt_idx_t *** inds, splatt_val_t ** vals, double const * const options, MPI_Comm comm) { sptensor_t * tt = mpi_simple_distribute(fname, comm); if(tt == NULL) { *nmodes = 0; *nnz = 0; *vals = NULL; *inds = NULL; return SPLATT_ERROR_BADINPUT; } *nmodes = tt->nmodes; *nnz = tt->nnz; /* copy to output */ *vals = tt->vals; *inds = splatt_malloc(tt->nmodes * sizeof(**inds)); for(idx_t m=0; m < tt->nmodes; ++m) { (*inds)[m] = tt->ind[m]; } free(tt); return SPLATT_SUCCESS; } /****************************************************************************** * PRIVATE FUNCTONS *****************************************************************************/ /** * @brief Fill buf with the next 'nnz_to_read' tensor values. * * @param fin The file to read from. * @param buf The sptensor buffer to fill. * @param nnz_to_read The number of nonzeros to read. */ static void p_fill_tt_nnz( FILE * fin, sptensor_t * const buf, idx_t const * const offset, idx_t const nnz_to_read) { idx_t const nmodes = buf->nmodes; char * ptr = NULL; char * line = NULL; ssize_t read; size_t len = 0; idx_t nnzread = 0; while(nnzread < nnz_to_read && (read = getline(&line, &len, fin)) != -1) { /* skip empty and commented lines */ if(read > 1 && line[0] != '#') { ptr = line; for(idx_t m=0; m < nmodes; ++m) { idx_t ind = strtoull(ptr, &ptr, 10); buf->ind[m][nnzread] = ind - offset[m]; } val_t const v = strtod(ptr, &ptr); buf->vals[nnzread++] = v; } } } static int * p_distribute_parts( sptensor_t * const ttbuf, char const * const pfname, rank_info * const rinfo) { /* root may have more than target_nnz */ idx_t const target_nnz = rinfo->global_nnz / rinfo->npes; int * parts = (int *) splatt_malloc(SS_MAX(ttbuf->nnz, target_nnz) * sizeof(int)); if(rinfo->rank == 0) { int ret; FILE * fin = open_f(pfname, "r"); /* send to all other ranks */ for(int p=1; p < rinfo->npes; ++p) { /* read into buffer */ for(idx_t n=0; n < target_nnz; ++n) { if((ret = fscanf(fin, "%d", &(parts[n]))) == 0) { fprintf(stderr, "SPLATT ERROR: not enough elements in '%s'\n", pfname); exit(1); } } MPI_Send(parts, target_nnz, MPI_INT, p, 0, rinfo->comm_3d); } /* now read my own part info */ for(idx_t n=0; n < ttbuf->nnz; ++n) { if((ret = fscanf(fin, "%d", &(parts[n]))) == 0) { fprintf(stderr, "SPLATT ERROR: not enough elements in '%s'\n", pfname); exit(1); } } fclose(fin); } else { /* receive part info */ MPI_Recv(parts, ttbuf->nnz, MPI_INT, 0, 0, rinfo->comm_3d, &(rinfo->status)); } return parts; } static void p_find_my_slices_1d( idx_t ** const ssizes, idx_t const nmodes, idx_t const nnz, rank_info * const rinfo) { idx_t const * const dims = rinfo->global_dims; /* find start/end slices for my partition */ for(idx_t m=0; m < nmodes; ++m) { /* current processor */ int currp = 0; idx_t lastn = 0; idx_t nnzcnt = 0; idx_t pnnz = nnz / rinfo->npes; rinfo->layer_starts[m] = 0; rinfo->layer_ends[m] = dims[m]; rinfo->mat_start[m] = 0; rinfo->mat_end[m] = dims[m]; for(idx_t s=0; s < dims[m]; ++s) { if(nnzcnt >= lastn + pnnz) { /* choose this slice or the previous, whichever is closer */ if(s > 0) { idx_t const thisdist = nnzcnt - (lastn + pnnz); idx_t const prevdist = (lastn + pnnz) - (nnzcnt - ssizes[m][s-1]); if(prevdist < thisdist) { lastn = nnzcnt - ssizes[m][s-1]; } else { lastn = nnzcnt; } } else { lastn = nnzcnt; } ++currp; /* adjust target nnz based on what is left */ pnnz = (nnz - lastn) / SS_MAX(1, rinfo->npes - currp); if(currp == rinfo->rank) { rinfo->mat_start[m] = s; } else if(currp == rinfo->rank+1 && currp != rinfo->npes) { /* only set mat_end if we aren't at the end of the tensor */ rinfo->mat_end[m] = s; break; } } nnzcnt += ssizes[m][s]; if(rinfo->rank == rinfo->npes-1) { assert(rinfo->mat_end[m] == rinfo->global_dims[m]); } } /* it is possible to have a very small dimension and too many ranks */ if(rinfo->npes > 1 && rinfo->mat_start[m] == 0 && rinfo->mat_end[m] == dims[m]) { fprintf(stderr, "SPLATT: rank: %d too many MPI ranks for mode %"\ SPLATT_PF_IDX".\n", rinfo->rank, m+1); rinfo->mat_start[m] = dims[m]; rinfo->mat_end[m] = dims[m]; } } } /** * @brief Count the nonzero values in a partition of X. * * @param fname The name of the file containing X. * @param nmodes The number of modes of X. * * @return The number of nonzeros in the intersection of all sstarts and sends. */ static idx_t p_count_my_nnz_1d( char const * const fname, idx_t const nmodes, idx_t const * const sstarts, idx_t const * const sends) { FILE * fin = open_f(fname, "r"); char * ptr = NULL; char * line = NULL; ssize_t read; size_t len = 0; /* count nnz in my partition */ idx_t mynnz = 0; while((read = getline(&line, &len, fin)) != -1) { /* skip empty and commented lines */ if(read > 1 && line[0] != '#') { int mine = 0; ptr = line; for(idx_t m=0; m < nmodes; ++m) { idx_t ind = strtoull(ptr, &ptr, 10) - 1; /* I own the nnz if it falls in any of my slices */ if(ind >= sstarts[m] && ind < sends[m]) { mine = 1; break; } } if(mine) { ++mynnz; } /* skip over tensor val */ strtod(ptr, &ptr); } } fclose(fin); free(line); return mynnz; } /** * @brief Read a partition of X into tt. * * @param fname The file containing X. * @param tt The tensor structure (must be pre-allocated). * @param sstarts Array of starting slices, inclusive (one for each mode). * @param sends Array of ending slices, exclusive (one for each mode). */ static void p_read_tt_part_1d( char const * const fname, sptensor_t * const tt, idx_t const * const sstarts, idx_t const * const sends) { idx_t const nnz = tt->nnz; idx_t const nmodes = tt->nmodes; char * ptr = NULL; char * line = NULL; ssize_t read; size_t len = 0; FILE * fin = open_f(fname, "r"); idx_t nnzread = 0; while(nnzread < nnz && (read = getline(&line, &len, fin)) != -1) { /* skip empty and commented lines */ if(read > 1 && line[0] != '#') { int mine = 0; ptr = line; for(idx_t m=0; m < nmodes; ++m) { idx_t ind = strtoull(ptr, &ptr, 10) - 1; tt->ind[m][nnzread] = ind; if(ind >= sstarts[m] && ind < sends[m]) { mine = 1; } } tt->vals[nnzread] = strtod(ptr, &ptr); if(mine) { ++nnzread; } } } fclose(fin); free(line); } /** * @brief Read my portion of X from a file. * * @param fname The file containing X. * @param ssizes The nonzero counts in each slice. * @param nmodes The number of modes in X. * @param rinfo MPI information (nnz, 3D comm, etc.). * * @return My portion of the sparse tensor read from fname. */ static sptensor_t * p_rearrange_coarse( sptensor_t * const ttbuf, idx_t ** const ssizes, rank_info * const rinfo) { int const rank = rinfo->rank; int const npes = rinfo->npes; idx_t const nnz = rinfo->global_nnz; idx_t const * const dims = rinfo->global_dims; idx_t const nmodes = ttbuf->nmodes; idx_t const maxdim = dims[argmax_elem(dims, nmodes)]; idx_t * weights = splatt_malloc(maxdim * sizeof(*weights)); idx_t * coarse_parts[MAX_NMODES]; /* find start/end slices for my partition */ for(idx_t m=0; m < nmodes; ++m) { /* copy ssizes[m] because a prefix sum will be performed */ memcpy(weights, ssizes[m], dims[m] * sizeof(*weights)); /* find a balanced partitioning of slices */ coarse_parts[m] = splatt_malloc((npes+1) * sizeof(**coarse_parts)); partition_1d(weights, dims[m], coarse_parts[m], npes); rinfo->layer_starts[m] = 0; rinfo->layer_ends[m] = dims[m]; /* store partition information */ rinfo->mat_start[m] = coarse_parts[m][rank]; rinfo->mat_end[m] = coarse_parts[m][rank+1]; } splatt_free(weights); int * parts = splatt_malloc(ttbuf->nnz * sizeof(*parts)); /* we repeatedly merge into this */ sptensor_t * ret = tt_alloc(0, nmodes); for(idx_t m=0; m < nmodes; ++m) { /* determine owners of all my nnz */ #pragma omp parallel for schedule(static) for(idx_t n=0; n < ttbuf->nnz; ++n) { idx_t const idx = ttbuf->ind[m][n]; for(int p=0; p < npes; ++p) { if(idx >= coarse_parts[m][p] && idx < coarse_parts[m][p+1]) { parts[n] = p; break; } } } sptensor_t * tt_mode = mpi_rearrange_by_part(ttbuf, parts, rinfo->comm_3d); #ifdef SPLATT_DEBUG /* sanity check on nnz -- this can be expensive */ assert(tt_remove_dups(tt_mode) == 0); idx_t totnnz; MPI_Reduce(&(tt_mode->nnz), &totnnz, 1, SPLATT_MPI_IDX, MPI_SUM, 0, rinfo->comm_3d); if(rank == 0) { assert(totnnz == rinfo->global_nnz); } #endif /* save the new unioned tensor and clean up */ sptensor_t * tt_merged = tt_union(ret, tt_mode); #ifdef SPLATT_DEBUG assert(tt_remove_dups(tt_merged) == 0); #endif tt_free(ret); ret = tt_merged; tt_free(tt_mode); } splatt_free(parts); for(idx_t m=0; m < nmodes; ++m) { splatt_free(coarse_parts[m]); } return ret; } /** * @brief Find the boundaries for a process layer. * * @param ssizes The number of nonzeros found in each index (of each mode). * ssizes[1][5] is the number of nonzeros in X(:,5,:). * @param mode Which mode to work on. * @param rinfo MPI rank information. */ static void p_find_layer_boundaries( idx_t ** const ssizes, idx_t const mode, rank_info * const rinfo) { idx_t const * const dims = rinfo->global_dims; idx_t const nnz = rinfo->global_nnz; idx_t const m = mode; /* find start/end slices for my partition */ int const layer_dim = rinfo->dims_3d[m]; idx_t pnnz = nnz / layer_dim; /* nnz in a layer */ /* current processor */ int currp = 0; idx_t lastn = 0; idx_t nnzcnt = ssizes[m][0]; /* initialize layer_ptrs */ rinfo->layer_ptrs[m] = splatt_malloc((layer_dim+1) * sizeof(**(rinfo->layer_ptrs))); rinfo->layer_ptrs[m][currp++] = 0; rinfo->layer_ptrs[m][layer_dim] = dims[m]; if(layer_dim == 1) { goto CLEANUP; return; } /* foreach slice */ for(idx_t s=1; s < dims[m]; ++s) { /* if we have passed the next layer boundary */ if(nnzcnt >= lastn + pnnz) { /* choose this slice or the previous, whichever is closer */ idx_t const thisdist = nnzcnt - (lastn + pnnz); idx_t const prevdist = (lastn + pnnz) - (nnzcnt - ssizes[m][s-1]); if(prevdist < thisdist) { lastn = nnzcnt - ssizes[m][s-1]; /* see below comment */ //rinfo->layer_ptrs[m][currp++] = s-1; } else { lastn = nnzcnt; //rinfo->layer_ptrs[m][currp++] = s; } /* Always choosing s but marking lastn with s-1 leads to better balance * and communication volume. This is totally a heuristic. */ rinfo->layer_ptrs[m][currp++] = s; /* exit early if we placed the last rank */ if(currp == layer_dim) { break; } /* adjust target nnz based on what is left */ pnnz = (nnz - lastn) / SS_MAX(1, layer_dim - (currp-1)); } nnzcnt += ssizes[m][s]; } for( ; currp < layer_dim; ++currp) { rinfo->layer_ptrs[m][currp] = dims[m]; } CLEANUP: /* store layer bounderies in layer_{starts, ends} */ rinfo->layer_starts[m] = rinfo->layer_ptrs[m][rinfo->coords_3d[m]]; rinfo->layer_ends[m] = rinfo->layer_ptrs[m][rinfo->coords_3d[m] + 1]; /* it is possible to have a very small dimension and too many ranks */ if(rinfo->dims_3d[m] > 1 && rinfo->layer_ends[m] - rinfo->layer_starts[m] == dims[m]) { fprintf(stderr, "SPLATT: rank: %d too many MPI ranks for mode %"\ SPLATT_PF_IDX".\n", rinfo->rank, m+1); rinfo->layer_starts[m] = dims[m]; rinfo->layer_ends[m] = dims[m]; } } /** * @brief Rearrange nonzeros according to a medium-grained decomposition. * * @param ttbuf The tensor to rearrange. * @param ssizes The number of nonzeros found in each index. * @param rinfo MPI rank information. * * @return My owned tensor nonzeros. */ static sptensor_t * p_rearrange_medium( sptensor_t * const ttbuf, idx_t * * ssizes, rank_info * const rinfo) { #pragma omp parallel for schedule(static, 1) for(idx_t m=0; m < ttbuf->nmodes; ++m) { p_find_layer_boundaries(ssizes, m, rinfo); } /* create partitioning */ int * parts = splatt_malloc(ttbuf->nnz * sizeof(*parts)); #pragma omp parallel for schedule(static) for(idx_t n=0; n < ttbuf->nnz; ++n) { parts[n] = mpi_determine_med_owner(ttbuf, n, rinfo); } sptensor_t * tt = mpi_rearrange_by_part(ttbuf, parts, rinfo->comm_3d); splatt_free(parts); return tt; } /** * @brief Rearrange nonzeros according to a medium-grained decomposition. * * @param ttbuf The tensor to rearrange. * @param pfname The filename containing the partitioning information. * @param ssizes The number of nonzeros found in each index. * @param rinfo MPI rank information. * * @return My owned tensor nonzeros. */ static sptensor_t * p_rearrange_fine( sptensor_t * const ttbuf, char const * const pfname, idx_t * * ssizes, rank_info * const rinfo) { /* first distribute partitioning information */ int * parts = p_distribute_parts(ttbuf, pfname, rinfo); sptensor_t * tt = mpi_rearrange_by_part(ttbuf, parts, rinfo->comm_3d); free(parts); return tt; } /** * @brief Count the nonzeros in each slice of X. * * @param tt My subtensor. * @param ssizes A 2D array for counting slice 'sizes'. * @param rinfo MPI information (containing global dims, nnz, etc.). */ static void p_fill_ssizes( sptensor_t const * const tt, idx_t ** const ssizes, rank_info const * const rinfo) { #pragma omp parallel { for(idx_t m=0; m < tt->nmodes; ++m) { idx_t const * const ind = tt->ind[m]; #pragma omp for schedule(static) for(idx_t n=0; n < tt->nnz; ++n) { #pragma omp atomic ssizes[m][ind[n]] += 1; } /* reduce to get total slice counts */ #pragma omp master MPI_Allreduce(MPI_IN_PLACE, ssizes[m], (int) rinfo->global_dims[m], SPLATT_MPI_IDX, MPI_SUM, rinfo->comm_3d); } } /* omp parallel */ } /** * @brief Fill in the best MPI dimensions we can find. The truly optimal * solution should involve the tensor's sparsity pattern, but in general * this works as good (but usually better) than the hand-tuned dimensions * that we tried. * * @param rinfo MPI rank information. */ static void p_get_best_mpi_dim( rank_info * const rinfo) { int nprimes = 0; int * primes = get_primes(rinfo->npes, &nprimes); idx_t total_size = 0; for(idx_t m=0; m < rinfo->nmodes; ++m) { total_size += rinfo->global_dims[m]; /* reset mpi dims */ rinfo->dims_3d[m] = 1; } idx_t target = total_size / (idx_t)rinfo->npes; idx_t diffs[MAX_NMODES]; /* start from the largest prime */ for(int p = nprimes-1; p >= 0; --p) { int furthest = 0; /* find dim furthest from target */ for(idx_t m=0; m < rinfo->nmodes; ++m) { /* distance is current - target */ idx_t const curr = rinfo->global_dims[m] / rinfo->dims_3d[m]; /* avoid underflow */ diffs[m] = (curr > target) ? (curr - target) : 0; if(diffs[m] > diffs[furthest]) { furthest = m; } } /* assign p processes to furthest mode */ rinfo->dims_3d[furthest] *= primes[p]; } splatt_free(primes); } /** * @brief Read a sparse tensor in coordinate form from a text file and * and distribute among MPI ranks. * * @param fin The file to read from. * @param comm The MPI communicator to distribute among. * * @return The sparse tensor. */ static sptensor_t * p_tt_mpi_read_file( FILE * fin, MPI_Comm comm) { int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); idx_t dims[MAX_NMODES]; idx_t offsets[MAX_NMODES]; idx_t global_nnz; idx_t nmodes; sptensor_t * tt = NULL; if(rank == 0) { /* send dimension info */ tt_get_dims(fin, &nmodes, &global_nnz, dims, offsets); rewind(fin); MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm); MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm); } else { MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm); MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm); } /* compute my even chunk of nonzeros -- root rank gets the extra amount */ idx_t const target_nnz = global_nnz / npes; idx_t my_nnz = target_nnz; if(rank == 0) { my_nnz = global_nnz - ((npes-1) * my_nnz); } /* read/send all chunks */ if(rank == 0) { sptensor_t * tt_buf = tt_alloc(target_nnz, nmodes); /* now send to everyone else */ for(int p=1; p < npes; ++p) { p_fill_tt_nnz(fin, tt_buf, offsets, target_nnz); for(idx_t m=0; m < tt_buf->nmodes; ++m) { MPI_Send(tt_buf->ind[m], target_nnz, SPLATT_MPI_IDX, p, m, comm); } MPI_Send(tt_buf->vals, target_nnz, SPLATT_MPI_VAL, p, nmodes, comm); } tt_free(tt_buf); /* load my own */ tt = tt_alloc(my_nnz, nmodes); p_fill_tt_nnz(fin, tt, offsets, my_nnz); } else { MPI_Status status; /* receive my chunk */ tt = tt_alloc(my_nnz, nmodes); for(idx_t m=0; m < tt->nmodes; ++m) { MPI_Recv(tt->ind[m], my_nnz, SPLATT_MPI_IDX, 0, m, comm, &status); } MPI_Recv(tt->vals, my_nnz, SPLATT_MPI_VAL, 0, nmodes, comm, &status); } return tt; } /** * @brief Read a sparse tensor in coordinate form from a binary file and * distribute among MPI ranks. * * @param fin The file to read from. * @param comm The MPI communicator to distribute among. * * @return The sparse tensor. */ static sptensor_t * p_tt_mpi_read_binary_file( FILE * fin, MPI_Comm comm) { sptensor_t * tt = NULL; int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); idx_t global_nnz; idx_t nmodes; idx_t dims[MAX_NMODES]; /* get header and tensor stats */ bin_header header; if(rank == 0) { read_binary_header(fin, &header); fill_binary_idx(&nmodes, 1, &header, fin); fill_binary_idx(dims, nmodes, &header, fin); fill_binary_idx(&global_nnz, 1, &header, fin); } /* send dimension info */ if(rank == 0) { MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm); MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm); } else { MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm); MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm); } /* sanity check */ if(nmodes > MAX_NMODES) { if(rank == 0) { fprintf(stderr, "SPLATT ERROR: maximum %"SPLATT_PF_IDX" modes supported. " "Found %"SPLATT_PF_IDX". Please recompile with " "MAX_NMODES=%"SPLATT_PF_IDX".\n", MAX_NMODES, nmodes, nmodes); } return NULL; } /* compute my even chunk of nonzeros -- root rank gets the extra amount */ idx_t const target_nnz = global_nnz / npes; idx_t my_nnz = target_nnz; if(rank == 0) { my_nnz = global_nnz - ((npes-1)* target_nnz); } tt = tt_alloc(my_nnz, nmodes); /* read/send all chunks */ if(rank == 0) { /* handle inds */ idx_t * ibuf = splatt_malloc(target_nnz * sizeof(idx_t)); for(idx_t m=0; m < nmodes; ++m) { for(int p=1; p < npes; ++p) { fill_binary_idx(ibuf, target_nnz, &header, fin); MPI_Send(ibuf, target_nnz, SPLATT_MPI_IDX, p, m, comm); } /* load my own */ fill_binary_idx(tt->ind[m], my_nnz, &header, fin); } splatt_free(ibuf); /* now vals */ val_t * vbuf = splatt_malloc(target_nnz * sizeof(val_t)); for(int p=1; p < npes; ++p) { fill_binary_val(vbuf, target_nnz, &header, fin); MPI_Send(vbuf, target_nnz, SPLATT_MPI_VAL, p, nmodes, comm); } splatt_free(vbuf); /* finally, load my own vals */ fill_binary_val(tt->vals, my_nnz, &header, fin); } else { /* non-root ranks just recv */ MPI_Status status; /* receive my chunk */ for(idx_t m=0; m < tt->nmodes; ++m) { MPI_Recv(tt->ind[m], my_nnz, SPLATT_MPI_IDX, 0, m, comm, &status); } MPI_Recv(tt->vals, my_nnz, SPLATT_MPI_VAL, 0, nmodes, comm, &status); } return tt; } /****************************************************************************** * PUBLIC FUNCTONS *****************************************************************************/ sptensor_t * mpi_tt_read( char const * const ifname, char const * const pfname, rank_info * const rinfo) { timer_start(&timers[TIMER_IO]); /* first just make sure it exists */ FILE * fin; if((fin = fopen(ifname, "r")) == NULL) { if(rinfo->rank == 0) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n", ifname); } return NULL; } fclose(fin); /* first naively distribute tensor nonzeros for analysis */ sptensor_t * ttbuf = mpi_simple_distribute(ifname, MPI_COMM_WORLD); rinfo->nmodes = ttbuf->nmodes; MPI_Allreduce(&(ttbuf->nnz), &(rinfo->global_nnz), 1, SPLATT_MPI_IDX, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(ttbuf->dims, &(rinfo->global_dims), ttbuf->nmodes, SPLATT_MPI_IDX, MPI_MAX, MPI_COMM_WORLD); /* first compute MPI dimension if not specified by the user */ if(rinfo->decomp == DEFAULT_MPI_DISTRIBUTION) { rinfo->decomp = SPLATT_DECOMP_MEDIUM; p_get_best_mpi_dim(rinfo); } mpi_setup_comms(rinfo); /* count # nonzeros found in each index */ idx_t * ssizes[MAX_NMODES]; for(idx_t m=0; m < ttbuf->nmodes; ++m) { ssizes[m] = (idx_t *) calloc(rinfo->global_dims[m], sizeof(idx_t)); } p_fill_ssizes(ttbuf, ssizes, rinfo); /* actually parse tensor */ sptensor_t * tt = NULL; switch(rinfo->decomp) { case SPLATT_DECOMP_COARSE: tt = p_rearrange_coarse(ttbuf, ssizes, rinfo); tt_fill_dims(tt); break; case SPLATT_DECOMP_MEDIUM: tt = p_rearrange_medium(ttbuf, ssizes, rinfo); /* now map tensor indices to local (layer) coordinates and fill in dims */ #pragma omp parallel for(idx_t m=0; m < ttbuf->nmodes; ++m) { #pragma omp master tt->dims[m] = rinfo->layer_ends[m] - rinfo->layer_starts[m]; #pragma omp for schedule(static) nowait for(idx_t n=0; n < tt->nnz; ++n) { assert(tt->ind[m][n] >= rinfo->layer_starts[m]); assert(tt->ind[m][n] < rinfo->layer_ends[m]); tt->ind[m][n] -= rinfo->layer_starts[m]; } } break; case SPLATT_DECOMP_FINE: tt = p_rearrange_fine(ttbuf, pfname, ssizes, rinfo); /* now fix tt->dims */ for(idx_t m=0; m < tt->nmodes; ++m) { tt->dims[m] = rinfo->global_dims[m]; rinfo->layer_ends[m] = tt->dims[m]; } break; } for(idx_t m=0; m < ttbuf->nmodes; ++m) { free(ssizes[m]); } tt_free(ttbuf); timer_stop(&timers[TIMER_IO]); return tt; } sptensor_t * mpi_filter_tt_1d( sptensor_t const * const tt, idx_t const mode, idx_t start, idx_t end) { sptensor_t * ftt = tt_alloc(tt->nnz, tt->nmodes); for(idx_t m=0; m < ftt->nmodes; ++m) { ftt->dims[m] = tt->dims[m]; } /* Adjust start and end if tt has been compressed. */ assert(start < end); /* TODO: change this linear search into a binary one */ if(tt->indmap[mode] != NULL) { for(idx_t i=0; i < tt->dims[mode]; ++i) { if(tt->indmap[mode][i] == start) { start = i; } if(tt->indmap[mode][i]+1 == end) { end = i+1; break; } } } idx_t nnz = 0; for(idx_t n=0; n < tt->nnz; ++n) { /* Copy the nonzero if we own the slice. */ if(tt->ind[mode][n] >= start && tt->ind[mode][n] < end) { for(idx_t m=0; m < tt->nmodes; ++m) { ftt->ind[m][nnz] = tt->ind[m][n]; } ftt->vals[nnz++] = tt->vals[n]; } } /* update ftt dimensions and nnz */ ftt->nnz = nnz; ftt->dims[mode] = end - start; /* now map mode coords to [0, end-start) */ #pragma omp parallel for schedule(static) for(idx_t n=0; n < ftt->nnz; ++n) { assert(ftt->ind[mode][n] >= start); assert(ftt->ind[mode][n] < end); ftt->ind[mode][n] -= start; } /* create new indmap for mode */ for(idx_t m=0; m < tt->nmodes; ++m) { if(tt->indmap[m] == NULL) { break; } ftt->indmap[m] = (idx_t *) realloc(ftt->indmap[m], ftt->dims[m] * sizeof(idx_t)); /* mode indices are shifted. otherwise just copy */ if(m == mode) { #pragma omp parallel for for(idx_t i=0; i < ftt->dims[mode]; ++i) { ftt->indmap[mode][i] = tt->indmap[mode][i+start]; } } else { par_memcpy(ftt->indmap[m], tt->indmap[m], tt->dims[m] * sizeof(idx_t)); } } /* sanity check */ for(idx_t i=0; i < ftt->dims[mode]; ++i) { assert(i + start < end); } for(idx_t n=0; n < ftt->nnz; ++n) { assert(ftt->ind[mode][n] < end - start); } return ftt; } void mpi_write_mats( matrix_t ** mats, permutation_t const * const perm, rank_info const * const rinfo, char const * const basename, idx_t const nmodes) { char * fname; idx_t const nfactors = mats[0]->J; MPI_Status status; idx_t maxdim = 0; idx_t maxlocaldim = 0; matrix_t * matbuf = NULL; val_t * vbuf = NULL; idx_t * loc_iperm = NULL; for(idx_t m=0; m < nmodes; ++m) { maxdim = SS_MAX(maxdim, rinfo->global_dims[m]); maxlocaldim = SS_MAX(maxlocaldim, mats[m]->I); } /* get the largest local dim */ if(rinfo->rank == 0) { MPI_Reduce(MPI_IN_PLACE, &maxlocaldim, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } else { MPI_Reduce(&maxlocaldim, NULL, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } if(rinfo->rank == 0) { matbuf = mat_alloc(maxdim, nfactors); loc_iperm = (idx_t *) splatt_malloc(maxdim * sizeof(idx_t)); vbuf = (val_t *) splatt_malloc(maxdim * nfactors * sizeof(val_t)); } for(idx_t m=0; m < nmodes; ++m) { /* root handles the writing */ if(rinfo->rank == 0) { asprintf(&fname, "%s%"SPLATT_PF_IDX".mat", basename, m+1); matbuf->I = rinfo->global_dims[m]; /* copy root's matrix to buffer */ for(idx_t i=0; i < mats[m]->I; ++i) { idx_t const gi = rinfo->layer_starts[m] + perm->iperms[m][i]; for(idx_t f=0; f < nfactors; ++f) { matbuf->vals[f + (gi*nfactors)] = mats[m]->vals[f+(i*nfactors)]; } } /* receive matrix from each rank */ for(int p=1; p < rinfo->npes; ++p) { idx_t layerstart; idx_t nrows; MPI_Recv(&layerstart, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); MPI_Recv(&nrows, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); MPI_Recv(vbuf, nrows * nfactors, SPLATT_MPI_VAL, p, 0, rinfo->comm_3d, &status); MPI_Recv(loc_iperm, nrows, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); /* permute buffer and copy into matbuf */ for(idx_t i=0; i < nrows; ++i) { idx_t const gi = layerstart + loc_iperm[i]; for(idx_t f=0; f < nfactors; ++f) { matbuf->vals[f + (gi*nfactors)] = vbuf[f+(i*nfactors)]; } } } /* write the factor matrix to disk */ mat_write(matbuf, fname); /* clean up */ free(fname); } else { /* send matrix to root */ MPI_Send(&(rinfo->layer_starts[m]), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); MPI_Send(&(mats[m]->I), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); MPI_Send(mats[m]->vals, mats[m]->I * mats[m]->J, SPLATT_MPI_VAL, 0, 0, rinfo->comm_3d); MPI_Send(perm->iperms[m] + rinfo->mat_start[m], mats[m]->I, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); } } /* foreach mode */ if(rinfo->rank == 0) { mat_free(matbuf); free(vbuf); free(loc_iperm); } } void mpi_write_part( sptensor_t const * const tt, permutation_t const * const perm, rank_info const * const rinfo) { /* file name is <rank>.part */ char name[256]; sprintf(name, "%d.part", rinfo->rank); FILE * fout = open_f(name, "w"); for(idx_t n=0; n < tt->nnz; ++n) { for(idx_t m=0; m < tt->nmodes; ++m) { /* map idx to original global coordinate */ idx_t idx = tt->ind[m][n]; if(tt->indmap[m] != NULL) { idx = tt->indmap[m][idx]; } if(perm->iperms[m] != NULL) { idx = perm->iperms[m][idx]; } /* write index */ fprintf(fout, "%"SPLATT_PF_IDX" ", 1+idx); } fprintf(fout, "%"SPLATT_PF_VAL"\n", tt->vals[n]); } fclose(fout); } sptensor_t * mpi_simple_distribute( char const * const ifname, MPI_Comm comm) { int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); sptensor_t * tt = NULL; FILE * fin = NULL; if(rank == 0) { fin = open_f(ifname, "r"); } switch(get_file_type(ifname)) { case SPLATT_FILE_TEXT_COORD: tt = p_tt_mpi_read_file(fin, comm); break; case SPLATT_FILE_BIN_COORD: tt = p_tt_mpi_read_binary_file(fin, comm); break; } if(rank == 0) { fclose(fin); } tt_fill_dims(tt); return tt; } matrix_t * mpi_mat_rand( idx_t const mode, idx_t const nfactors, permutation_t const * const perm, rank_info * const rinfo) { idx_t const localdim = rinfo->mat_end[mode] - rinfo->mat_start[mode]; matrix_t * mymat = mat_alloc(localdim, nfactors); MPI_Status status; /* figure out buffer sizes */ idx_t maxlocaldim = localdim; if(rinfo->rank == 0) { MPI_Reduce(MPI_IN_PLACE, &maxlocaldim, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } else { MPI_Reduce(&maxlocaldim, NULL, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } /* root rank does the heavy lifting */ if(rinfo->rank == 0) { /* allocate buffers */ idx_t * loc_perm = splatt_malloc(maxlocaldim * sizeof(*loc_perm)); val_t * vbuf = splatt_malloc(maxlocaldim * nfactors * sizeof(*vbuf)); /* allocate initial factor */ matrix_t * full_factor = mat_rand(rinfo->global_dims[mode], nfactors); /* copy root's own matrix to output */ #pragma omp parallel for schedule(static) for(idx_t i=0; i < localdim; ++i) { idx_t const gi = rinfo->mat_start[mode] + perm->iperms[mode][i]; for(idx_t f=0; f < nfactors; ++f) { mymat->vals[f + (i*nfactors)] = full_factor->vals[f+(gi*nfactors)]; } } /* communicate! */ for(int p=1; p < rinfo->npes; ++p) { /* first receive layer start and permutation info */ idx_t layerstart; idx_t nrows; MPI_Recv(&layerstart, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); MPI_Recv(&nrows, 1, SPLATT_MPI_IDX, p, 1, rinfo->comm_3d, &status); MPI_Recv(loc_perm, nrows, SPLATT_MPI_IDX, p, 2, rinfo->comm_3d, &status); /* fill buffer */ #pragma omp parallel for schedule(static) for(idx_t i=0; i < nrows; ++i) { idx_t const gi = layerstart + loc_perm[i]; for(idx_t f=0; f < nfactors; ++f) { vbuf[f + (i*nfactors)] = full_factor->vals[f+(gi*nfactors)]; } } /* send to rank p */ MPI_Send(vbuf, nrows * nfactors, SPLATT_MPI_VAL, p, 3, rinfo->comm_3d); } mat_free(full_factor); splatt_free(loc_perm); splatt_free(vbuf); /* other ranks just send/recv */ } else { /* send permutation info to root */ MPI_Send(&(rinfo->layer_starts[mode]), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); MPI_Send(&localdim, 1, SPLATT_MPI_IDX, 0, 1, rinfo->comm_3d); MPI_Send(perm->iperms[mode] + rinfo->mat_start[mode], localdim, SPLATT_MPI_IDX, 0, 2, rinfo->comm_3d); /* receive factor */ MPI_Recv(mymat->vals, mymat->I * mymat->J, SPLATT_MPI_VAL, 0, 3, rinfo->comm_3d, &status); } return mymat; } sptensor_t * mpi_rearrange_by_part( sptensor_t const * const ttbuf, int const * const parts, MPI_Comm comm) { int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); /* count how many to send to each process */ int * nsend = calloc(npes, sizeof(*nsend)); int * nrecv = calloc(npes, sizeof(*nrecv)); for(idx_t n=0; n < ttbuf->nnz; ++n) { nsend[parts[n]] += 1; } MPI_Alltoall(nsend, 1, MPI_INT, nrecv, 1, MPI_INT, comm); idx_t send_total = 0; idx_t recv_total = 0; for(int p=0; p < npes; ++p) { send_total += nsend[p]; recv_total += nrecv[p]; } assert(send_total = ttbuf->nnz); /* how many nonzeros I'll own */ idx_t const nowned = recv_total; int * send_disp = splatt_malloc((npes+1) * sizeof(*send_disp)); int * recv_disp = splatt_malloc((npes+1) * sizeof(*recv_disp)); /* recv_disp is const so we'll just fill it out once */ recv_disp[0] = 0; for(int p=1; p <= npes; ++p) { recv_disp[p] = recv_disp[p-1] + nrecv[p-1]; } /* allocate my tensor and send buffer */ sptensor_t * tt = tt_alloc(nowned, ttbuf->nmodes); idx_t * isend_buf = splatt_malloc(ttbuf->nnz * sizeof(*isend_buf)); /* rearrange into sendbuf and send one mode at a time */ for(idx_t m=0; m < ttbuf->nmodes; ++m) { /* prefix sum to make disps */ send_disp[0] = send_disp[1] = 0; for(int p=2; p <= npes; ++p) { send_disp[p] = send_disp[p-1] + nsend[p-2]; } idx_t const * const ind = ttbuf->ind[m]; for(idx_t n=0; n < ttbuf->nnz; ++n) { idx_t const index = send_disp[parts[n]+1]++; isend_buf[index] = ind[n]; } /* exchange indices */ MPI_Alltoallv(isend_buf, nsend, send_disp, SPLATT_MPI_IDX, tt->ind[m], nrecv, recv_disp, SPLATT_MPI_IDX, comm); } splatt_free(isend_buf); /* lastly, rearrange vals */ val_t * vsend_buf = splatt_malloc(ttbuf->nnz * sizeof(*vsend_buf)); send_disp[0] = send_disp[1] = 0; for(int p=2; p <= npes; ++p) { send_disp[p] = send_disp[p-1] + nsend[p-2]; } val_t const * const vals = ttbuf->vals; for(idx_t n=0; n < ttbuf->nnz; ++n) { idx_t const index = send_disp[parts[n]+1]++; vsend_buf[index] = vals[n]; } /* exchange vals */ MPI_Alltoallv(vsend_buf, nsend, send_disp, SPLATT_MPI_VAL, tt->vals, nrecv, recv_disp, SPLATT_MPI_VAL, comm); splatt_free(vsend_buf); splatt_free(send_disp); splatt_free(recv_disp); /* allocated with calloc */ free(nsend); free(nrecv); /* fill dim info */ tt_fill_dims(tt); return tt; } int mpi_determine_med_owner( sptensor_t * const ttbuf, idx_t const n, rank_info * const rinfo) { int coords[MAX_NMODES]; assert(rinfo->decomp == SPLATT_DECOMP_MEDIUM); /* determine the coordinates of the owner rank */ for(idx_t m=0; m < ttbuf->nmodes; ++m) { idx_t const id = ttbuf->ind[m][n]; /* silly linear scan over each layer. * TODO: do a binary search */ for(int l=1; l <= rinfo->dims_3d[m]; ++l) { if(id < rinfo->layer_ptrs[m][l]) { coords[m] = l-1; break; } } } /* translate that to an MPI rank */ int owner; MPI_Cart_rank(rinfo->comm_3d, coords, &owner); return owner; }
_vet.c
/* Generated by Cython 0.29.13 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [ "/usr/lib/python3/dist-packages/numpy/core/include/numpy/arrayobject.h", "/usr/lib/python3/dist-packages/numpy/core/include/numpy/ufuncobject.h" ], "extra_compile_args": [ "-fopenmp" ], "extra_link_args": [ "-fopenmp" ], "include_dirs": [ "/usr/lib/python3/dist-packages/numpy/core/include" ], "language": "c", "name": "pysteps.motion._vet", "sources": [ "pysteps/motion/_vet.pyx" ] }, "module_name": "pysteps.motion._vet" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_13" #define CYTHON_HEX_VERSION 0x001D0DF0 #define CYTHON_FUTURE_DIVISION 1 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #define PyObject_Unicode PyObject_Str #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__pysteps__motion___vet #define __PYX_HAVE_API__pysteps__motion___vet /* Early includes */ #include <string.h> #include <stdio.h> #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include <math.h> #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* Header.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "pysteps/motion/_vet.pyx", "__init__.pxd", "type.pxd", }; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":777 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":785 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":790 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":802 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":806 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":808 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":811 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":812 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":813 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "pysteps/motion/_vet.pyx":13 * cimport numpy as np * * ctypedef np.float64_t float64 # <<<<<<<<<<<<<< * ctypedef np.int8_t int8 * ctypedef np.intp_t intp */ typedef __pyx_t_5numpy_float64_t __pyx_t_7pysteps_6motion_4_vet_float64; /* "pysteps/motion/_vet.pyx":14 * * ctypedef np.float64_t float64 * ctypedef np.int8_t int8 # <<<<<<<<<<<<<< * ctypedef np.intp_t intp * */ typedef __pyx_t_5numpy_int8_t __pyx_t_7pysteps_6motion_4_vet_int8; /* "pysteps/motion/_vet.pyx":15 * ctypedef np.float64_t float64 * ctypedef np.int8_t int8 * ctypedef np.intp_t intp # <<<<<<<<<<<<<< * * from libc.math cimport floor, round */ typedef __pyx_t_5numpy_intp_t __pyx_t_7pysteps_6motion_4_vet_intp; /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /*--- Type declarations ---*/ /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":816 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":817 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* BufferGetAndValidate.proto */ #define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ ((obj == Py_None || obj == NULL) ?\ (__Pyx_ZeroBuffer(buf), 0) :\ __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static void __Pyx_ZeroBuffer(Py_buffer* buf); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); #define __Pyx_BufPtrStrided3d(type, buf, i0, s0, i1, s1, i2, s2) (type)((char*)buf + i0 * s0 + i1 * s1 + i2 * s2) #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* BufferFallbackError.proto */ static void __Pyx_RaiseBufferFallbackError(void); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* IterFinish.proto */ static CYTHON_INLINE int __Pyx_IterFinish(void); /* UnpackItemEndCheck.proto */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_SubtractObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) #endif /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key); #define __Pyx_PyObject_Dict_GetItem(obj, name)\ (likely(PyDict_CheckExact(obj)) ?\ __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name)) #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name) #endif /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* TypeImport.proto */ #ifndef __PYX_HAVE_RT_ImportType_proto #define __PYX_HAVE_RT_ImportType_proto enum __Pyx_ImportType_CheckSize { __Pyx_ImportType_CheckSize_Error = 0, __Pyx_ImportType_CheckSize_Warn = 1, __Pyx_ImportType_CheckSize_Ignore = 2 }; static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* RealImag.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX\ && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_float(a, b) ((a)==(b)) #define __Pyx_c_sum_float(a, b) ((a)+(b)) #define __Pyx_c_diff_float(a, b) ((a)-(b)) #define __Pyx_c_prod_float(a, b) ((a)*(b)) #define __Pyx_c_quot_float(a, b) ((a)/(b)) #define __Pyx_c_neg_float(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_float(z) ((z)==(float)0) #define __Pyx_c_conj_float(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_float(z) (::std::abs(z)) #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_float(z) ((z)==0) #define __Pyx_c_conj_float(z) (conjf(z)) #if 1 #define __Pyx_c_abs_float(z) (cabsf(z)) #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_double(a, b) ((a)==(b)) #define __Pyx_c_sum_double(a, b) ((a)+(b)) #define __Pyx_c_diff_double(a, b) ((a)-(b)) #define __Pyx_c_prod_double(a, b) ((a)*(b)) #define __Pyx_c_quot_double(a, b) ((a)/(b)) #define __Pyx_c_neg_double(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_double(z) ((z)==(double)0) #define __Pyx_c_conj_double(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_double(z) (::std::abs(z)) #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_double(z) ((z)==0) #define __Pyx_c_conj_double(z) (conj(z)) #if 1 #define __Pyx_c_abs_double(z) (cabs(z)) #define __Pyx_c_pow_double(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntFromPy.proto */ static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cython' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'libc.math' */ /* Module declarations from 'pysteps.motion._vet' */ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet_float_abs(__pyx_t_7pysteps_6motion_4_vet_float64); /*proto*/ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_intp __pyx_f_7pysteps_6motion_4_vet_int_min(__pyx_t_7pysteps_6motion_4_vet_intp, __pyx_t_7pysteps_6motion_4_vet_intp); /*proto*/ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_intp __pyx_f_7pysteps_6motion_4_vet_int_max(__pyx_t_7pysteps_6motion_4_vet_intp, __pyx_t_7pysteps_6motion_4_vet_intp); /*proto*/ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_t_7pysteps_6motion_4_vet_float64, __pyx_t_7pysteps_6motion_4_vet_float64, __pyx_t_7pysteps_6motion_4_vet_float64, __pyx_t_7pysteps_6motion_4_vet_float64, __pyx_t_7pysteps_6motion_4_vet_float64); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64 = { "float64", NULL, sizeof(__pyx_t_7pysteps_6motion_4_vet_float64), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8 = { "int8", NULL, sizeof(__pyx_t_7pysteps_6motion_4_vet_int8), { 0 }, 0, IS_UNSIGNED(__pyx_t_7pysteps_6motion_4_vet_int8) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_7pysteps_6motion_4_vet_int8), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp = { "intp", NULL, sizeof(__pyx_t_7pysteps_6motion_4_vet_intp), { 0 }, 0, IS_UNSIGNED(__pyx_t_7pysteps_6motion_4_vet_intp) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_7pysteps_6motion_4_vet_intp), 0 }; #define __Pyx_MODULE_NAME "pysteps.motion._vet" extern int __pyx_module_is_main_pysteps__motion___vet; int __pyx_module_is_main_pysteps__motion___vet = 0; /* Implementation of 'pysteps.motion._vet' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_zip; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_builtin_ImportError; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_l[] = "l"; static const char __pyx_k_m[] = "m"; static const char __pyx_k_x[] = "x"; static const char __pyx_k_y[] = "y"; static const char __pyx_k_dx[] = "dx"; static const char __pyx_k_dy[] = "dy"; static const char __pyx_k_l0[] = "l0"; static const char __pyx_k_l1[] = "l1"; static const char __pyx_k_ll[] = "ll"; static const char __pyx_k_m0[] = "m0"; static const char __pyx_k_m1[] = "m1"; static const char __pyx_k_mm[] = "mm"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_nx[] = "nx"; static const char __pyx_k_ny[] = "ny"; static const char __pyx_k_xy[] = "xy"; static const char __pyx_k_f00[] = "f00"; static const char __pyx_k_f01[] = "f01"; static const char __pyx_k_f10[] = "f10"; static const char __pyx_k_f11[] = "f11"; static const char __pyx_k_l_i[] = "l_i"; static const char __pyx_k_m_j[] = "m_j"; static const char __pyx_k_sum[] = "sum"; static const char __pyx_k_zip[] = "zip"; static const char __pyx_k_axis[] = "axis"; static const char __pyx_k_full[] = "full"; static const char __pyx_k_int8[] = "int8"; static const char __pyx_k_intp[] = "intp"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mask[] = "mask"; static const char __pyx_k_mean[] = "mean"; static const char __pyx_k_name[] = "__name__"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_warp[] = "_warp"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_i_max[] = "i_max"; static const char __pyx_k_i_min[] = "i_min"; static const char __pyx_k_i_sec[] = "i_sec"; static const char __pyx_k_image[] = "image"; static const char __pyx_k_j_max[] = "j_max"; static const char __pyx_k_j_min[] = "j_min"; static const char __pyx_k_j_sec[] = "j_sec"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_arange[] = "arange"; static const char __pyx_k_buffer[] = "buffer"; static const char __pyx_k_counts[] = "counts"; static const char __pyx_k_df_dx2[] = "df_dx2"; static const char __pyx_k_df_dy2[] = "df_dy2"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_unique[] = "unique"; static const char __pyx_k_x_ceil[] = "x_ceil"; static const char __pyx_k_y_ceil[] = "y_ceil"; static const char __pyx_k_df_dxdy[] = "df_dxdy"; static const char __pyx_k_float64[] = "float64"; static const char __pyx_k_i_shift[] = "i_shift"; static const char __pyx_k_j_shift[] = "j_shift"; static const char __pyx_k_reshape[] = "reshape"; static const char __pyx_k_x_float[] = "x_float"; static const char __pyx_k_x_floor[] = "x_floor"; static const char __pyx_k_x_guess[] = "x_guess"; static const char __pyx_k_y_float[] = "y_float"; static const char __pyx_k_y_floor[] = "y_floor"; static const char __pyx_k_y_guess[] = "y_guess"; static const char __pyx_k_gradient[] = "gradient"; static const char __pyx_k_new_image[] = "new_image"; static const char __pyx_k_residuals[] = "residuals"; static const char __pyx_k_x_max_int[] = "x_max_int"; static const char __pyx_k_x_sectors[] = "x_sectors"; static const char __pyx_k_y_max_int[] = "y_max_int"; static const char __pyx_k_y_sectors[] = "y_sectors"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_morph_mask[] = "morph_mask"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_grad_smooth[] = "grad_smooth"; static const char __pyx_k_input_image[] = "input_image"; static const char __pyx_k_interp_coef[] = "interp_coef"; static const char __pyx_k_sector_area[] = "sector_area"; static const char __pyx_k_smooth_gain[] = "smooth_gain"; static const char __pyx_k_x_max_float[] = "x_max_float"; static const char __pyx_k_y_max_float[] = "y_max_float"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_displacement[] = "displacement"; static const char __pyx_k_morphed_mask[] = "morphed_mask"; static const char __pyx_k_return_index[] = "return_index"; static const char __pyx_k_x_image_size[] = "x_image_size"; static const char __pyx_k_y_image_size[] = "y_image_size"; static const char __pyx_k_cost_function[] = "_cost_function"; static const char __pyx_k_gradient_data[] = "_gradient_data"; static const char __pyx_k_morphed_image[] = "morphed_image"; static const char __pyx_k_return_counts[] = "return_counts"; static const char __pyx_k_x_sector_size[] = "x_sector_size"; static const char __pyx_k_y_sector_size[] = "y_sector_size"; static const char __pyx_k_grad_residuals[] = "grad_residuals"; static const char __pyx_k_template_image[] = "template_image"; static const char __pyx_k_gradient_values[] = "gradient_values"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_smoothness_penalty[] = "smoothness_penalty"; static const char __pyx_k_pysteps_motion__vet[] = "pysteps.motion._vet"; static const char __pyx_k_sector_displacement[] = "sector_displacement"; static const char __pyx_k_pysteps_motion__vet_pyx[] = "pysteps/motion/_vet.pyx"; static const char __pyx_k_inloop_smoothness_penalty[] = "inloop_smoothness_penalty"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_Error_computing_cost_function[] = "Error computing cost function.\n"; static const char __pyx_k_Cython_module_for_morphing_and[] = "\nCython module for morphing and cost functions implementations used in\nin the Variation Echo Tracking Algorithm\n"; static const char __pyx_k_The_number_of_sectors_in_x_axis[] = "The number of sectors in x axis (axis=0) don't divide the image size"; static const char __pyx_k_The_number_of_sectors_in_y_axis[] = "The number of sectors in y axis (axis=1) don't divide the image size"; static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_kp_u_Error_computing_cost_function; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_kp_u_The_number_of_sectors_in_x_axis; static PyObject *__pyx_kp_u_The_number_of_sectors_in_y_axis; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_arange; static PyObject *__pyx_n_s_axis; static PyObject *__pyx_n_s_buffer; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_cost_function; static PyObject *__pyx_n_s_counts; static PyObject *__pyx_n_s_df_dx2; static PyObject *__pyx_n_s_df_dxdy; static PyObject *__pyx_n_s_df_dy2; static PyObject *__pyx_n_s_displacement; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dx; static PyObject *__pyx_n_s_dy; static PyObject *__pyx_n_s_f00; static PyObject *__pyx_n_s_f01; static PyObject *__pyx_n_s_f10; static PyObject *__pyx_n_s_f11; static PyObject *__pyx_n_s_float64; static PyObject *__pyx_n_u_float64; static PyObject *__pyx_n_s_full; static PyObject *__pyx_n_s_grad_residuals; static PyObject *__pyx_n_s_grad_smooth; static PyObject *__pyx_n_s_gradient; static PyObject *__pyx_n_s_gradient_data; static PyObject *__pyx_n_s_gradient_values; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_i_max; static PyObject *__pyx_n_s_i_min; static PyObject *__pyx_n_s_i_sec; static PyObject *__pyx_n_s_i_shift; static PyObject *__pyx_n_s_image; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_inloop_smoothness_penalty; static PyObject *__pyx_n_s_input_image; static PyObject *__pyx_n_s_int8; static PyObject *__pyx_n_s_interp_coef; static PyObject *__pyx_n_s_intp; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_j_max; static PyObject *__pyx_n_s_j_min; static PyObject *__pyx_n_s_j_sec; static PyObject *__pyx_n_s_j_shift; static PyObject *__pyx_n_s_l; static PyObject *__pyx_n_s_l0; static PyObject *__pyx_n_s_l1; static PyObject *__pyx_n_s_l_i; static PyObject *__pyx_n_s_ll; static PyObject *__pyx_n_s_m; static PyObject *__pyx_n_s_m0; static PyObject *__pyx_n_s_m1; static PyObject *__pyx_n_s_m_j; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mask; static PyObject *__pyx_n_s_mean; static PyObject *__pyx_n_s_mm; static PyObject *__pyx_n_s_morph_mask; static PyObject *__pyx_n_s_morphed_image; static PyObject *__pyx_n_s_morphed_mask; static PyObject *__pyx_n_s_name; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_new_image; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_kp_u_numpy_core_multiarray_failed_to; static PyObject *__pyx_kp_u_numpy_core_umath_failed_to_impor; static PyObject *__pyx_n_s_nx; static PyObject *__pyx_n_s_ny; static PyObject *__pyx_n_s_pysteps_motion__vet; static PyObject *__pyx_kp_s_pysteps_motion__vet_pyx; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reshape; static PyObject *__pyx_n_s_residuals; static PyObject *__pyx_n_s_return_counts; static PyObject *__pyx_n_s_return_index; static PyObject *__pyx_n_s_sector_area; static PyObject *__pyx_n_s_sector_displacement; static PyObject *__pyx_n_s_smooth_gain; static PyObject *__pyx_n_s_smoothness_penalty; static PyObject *__pyx_n_s_sum; static PyObject *__pyx_n_s_template_image; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_unique; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_warp; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_x_ceil; static PyObject *__pyx_n_s_x_float; static PyObject *__pyx_n_s_x_floor; static PyObject *__pyx_n_s_x_guess; static PyObject *__pyx_n_s_x_image_size; static PyObject *__pyx_n_s_x_max_float; static PyObject *__pyx_n_s_x_max_int; static PyObject *__pyx_n_s_x_sector_size; static PyObject *__pyx_n_s_x_sectors; static PyObject *__pyx_n_s_xy; static PyObject *__pyx_n_s_y; static PyObject *__pyx_n_s_y_ceil; static PyObject *__pyx_n_s_y_float; static PyObject *__pyx_n_s_y_floor; static PyObject *__pyx_n_s_y_guess; static PyObject *__pyx_n_s_y_image_size; static PyObject *__pyx_n_s_y_max_float; static PyObject *__pyx_n_s_y_max_int; static PyObject *__pyx_n_s_y_sector_size; static PyObject *__pyx_n_s_y_sectors; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_n_s_zip; static PyObject *__pyx_pf_7pysteps_6motion_4_vet__warp(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_image, PyArrayObject *__pyx_v_mask, PyArrayObject *__pyx_v_displacement, int __pyx_v_gradient); /* proto */ static PyObject *__pyx_pf_7pysteps_6motion_4_vet_2_cost_function(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_sector_displacement, PyArrayObject *__pyx_v_template_image, PyArrayObject *__pyx_v_input_image, PyArrayObject *__pyx_v_mask, float __pyx_v_smooth_gain, int __pyx_v_gradient); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static PyObject *__pyx_float_1_0; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_2; static PyObject *__pyx_int_4; static PyObject *__pyx_tuple_; static PyObject *__pyx_slice__3; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__15; static PyObject *__pyx_codeobj__14; static PyObject *__pyx_codeobj__16; /* Late includes */ /* "pysteps/motion/_vet.pyx":21 * cimport numpy as np * * cdef inline float64 float_abs(float64 a) nogil: return a if a > 0. else -a # <<<<<<<<<<<<<< * """ Return the absolute value of a float """ * */ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet_float_abs(__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_a) { __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_r; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_t_1; if (((__pyx_v_a > 0.) != 0)) { __pyx_t_1 = __pyx_v_a; } else { __pyx_t_1 = (-__pyx_v_a); } __pyx_r = __pyx_t_1; goto __pyx_L0; /* function exit code */ __pyx_L0:; return __pyx_r; } /* "pysteps/motion/_vet.pyx":24 * """ Return the absolute value of a float """ * * cdef inline intp int_min(intp a, intp b) nogil: return a if a < b else b # <<<<<<<<<<<<<< * * cdef inline intp int_max(intp a, intp b) nogil: return a if a > b else b */ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_intp __pyx_f_7pysteps_6motion_4_vet_int_min(__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_a, __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_b) { __pyx_t_7pysteps_6motion_4_vet_intp __pyx_r; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_1; if (((__pyx_v_a < __pyx_v_b) != 0)) { __pyx_t_1 = __pyx_v_a; } else { __pyx_t_1 = __pyx_v_b; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* function exit code */ __pyx_L0:; return __pyx_r; } /* "pysteps/motion/_vet.pyx":26 * cdef inline intp int_min(intp a, intp b) nogil: return a if a < b else b * * cdef inline intp int_max(intp a, intp b) nogil: return a if a > b else b # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_intp __pyx_f_7pysteps_6motion_4_vet_int_max(__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_a, __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_b) { __pyx_t_7pysteps_6motion_4_vet_intp __pyx_r; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_1; if (((__pyx_v_a > __pyx_v_b) != 0)) { __pyx_t_1 = __pyx_v_a; } else { __pyx_t_1 = __pyx_v_b; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* function exit code */ __pyx_L0:; return __pyx_r; } /* "pysteps/motion/_vet.pyx":29 * * @cython.cdivision(True) * cdef inline float64 _linear_interpolation(float64 x, # <<<<<<<<<<<<<< * float64 x1, * float64 x2, */ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x1, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x2, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y1, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y2) { __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_r; int __pyx_t_1; /* "pysteps/motion/_vet.pyx":39 * """ * * if float_abs(x1 - x2) < 1e-6: # <<<<<<<<<<<<<< * return y1 * */ __pyx_t_1 = ((__pyx_f_7pysteps_6motion_4_vet_float_abs((__pyx_v_x1 - __pyx_v_x2)) < 1e-6) != 0); if (__pyx_t_1) { /* "pysteps/motion/_vet.pyx":40 * * if float_abs(x1 - x2) < 1e-6: * return y1 # <<<<<<<<<<<<<< * * return y1 + (x - x1) * (y2 - y1) / (x2 - x1) */ __pyx_r = __pyx_v_y1; goto __pyx_L0; /* "pysteps/motion/_vet.pyx":39 * """ * * if float_abs(x1 - x2) < 1e-6: # <<<<<<<<<<<<<< * return y1 * */ } /* "pysteps/motion/_vet.pyx":42 * return y1 * * return y1 + (x - x1) * (y2 - y1) / (x2 - x1) # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ __pyx_r = (__pyx_v_y1 + (((__pyx_v_x - __pyx_v_x1) * (__pyx_v_y2 - __pyx_v_y1)) / (__pyx_v_x2 - __pyx_v_x1))); goto __pyx_L0; /* "pysteps/motion/_vet.pyx":29 * * @cython.cdivision(True) * cdef inline float64 _linear_interpolation(float64 x, # <<<<<<<<<<<<<< * float64 x1, * float64 x2, */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "pysteps/motion/_vet.pyx":45 * * @cython.cdivision(True) * cdef inline float64 _bilinear_interpolation(float64 x, # <<<<<<<<<<<<<< * float64 y, * float64 x1, */ static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet__bilinear_interpolation(__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x1, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x2, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y1, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y2, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_q11, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_q12, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_q21, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_q22) { __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f_x_y1; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f_x_y2; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_r; /* "pysteps/motion/_vet.pyx":59 * cdef float64 f_x_y1, f_x_y2 * * f_x_y1 = _linear_interpolation(x, x1, x2, q11, q21) # <<<<<<<<<<<<<< * f_x_y2 = _linear_interpolation(x, x1, x2, q12, q22) * return _linear_interpolation(y, y1, y2, f_x_y1, f_x_y2) */ __pyx_v_f_x_y1 = __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_v_x, __pyx_v_x1, __pyx_v_x2, __pyx_v_q11, __pyx_v_q21); /* "pysteps/motion/_vet.pyx":60 * * f_x_y1 = _linear_interpolation(x, x1, x2, q11, q21) * f_x_y2 = _linear_interpolation(x, x1, x2, q12, q22) # <<<<<<<<<<<<<< * return _linear_interpolation(y, y1, y2, f_x_y1, f_x_y2) * */ __pyx_v_f_x_y2 = __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_v_x, __pyx_v_x1, __pyx_v_x2, __pyx_v_q12, __pyx_v_q22); /* "pysteps/motion/_vet.pyx":61 * f_x_y1 = _linear_interpolation(x, x1, x2, q11, q21) * f_x_y2 = _linear_interpolation(x, x1, x2, q12, q22) * return _linear_interpolation(y, y1, y2, f_x_y1, f_x_y2) # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __pyx_r = __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_v_y, __pyx_v_y1, __pyx_v_y2, __pyx_v_f_x_y1, __pyx_v_f_x_y2); goto __pyx_L0; /* "pysteps/motion/_vet.pyx":45 * * @cython.cdivision(True) * cdef inline float64 _bilinear_interpolation(float64 x, # <<<<<<<<<<<<<< * float64 y, * float64 x1, */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "pysteps/motion/_vet.pyx":67 * @cython.nonecheck(False) * @cython.cdivision(True) * def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<< * np.ndarray[int8, ndim=2] mask, * np.ndarray[float64, ndim=3] displacement, */ /* Python wrapper */ static PyObject *__pyx_pw_7pysteps_6motion_4_vet_1_warp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_7pysteps_6motion_4_vet__warp[] = "\n Morph image by applying a displacement field (Warping).\n \n The new image is created by selecting for each position the values of the\n input image at the positions given by the x and y displacements. \n The routine works in a backward sense. \n The displacement vectors have to refer to their destination.\n \n For more information in Morphing functions see Section 3 in \n `Beezley and Mandel (2008)`_.\n \n Beezley, J. D., & Mandel, J. (2008). \n Morphing ensemble Kalman filters. Tellus A, 60(1), 131-140.\n \n .. _`Beezley and Mandel (2008)`: http://dx.doi.org/10.1111/ j.1600-0870.2007.00275.x\n\n \n The displacement field in x and y directions and the image must have the\n same dimensions.\n \n The morphing is executed in parallel over x axis.\n \n The value of displaced pixels that fall outside the limits takes the \n value of the nearest edge. Those pixels are indicated by values greater\n than 1 in the output mask.\n \n Parameters\n ----------\n \n image : ndarray (ndim = 2)\n Image to morph\n \n displacement : ndarray (ndim = 3)\n Displacement field to be applied (Warping). \n \n The dimensions are:\n displacement [ x (0) or y (1) , \n i index of pixel, j index of pixel ]\n\n gradient : bool, optional\n If True, the gradient of the morphing function is returned.\n\n\n Returns\n -------\n \n image : ndarray (float64 ,ndim = 2)\n Morphed image.\n \n mask : ndarray (int8 ,ndim = 2)\n Invalid values mask. Points outside the boundaries are masked.\n Values greater than 1, indicate masked values.\n\n gradient_values : ndarray (float64 ,ndim = 3), optional\n If gradient keyword is True, the gradient of the function is also\n returned.\n "; static PyMethodDef __pyx_mdef_7pysteps_6motion_4_vet_1_warp = {"_warp", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7pysteps_6motion_4_vet_1_warp, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7pysteps_6motion_4_vet__warp}; static PyObject *__pyx_pw_7pysteps_6motion_4_vet_1_warp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_image = 0; PyArrayObject *__pyx_v_mask = 0; PyArrayObject *__pyx_v_displacement = 0; int __pyx_v_gradient; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_warp (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_image,&__pyx_n_s_mask,&__pyx_n_s_displacement,&__pyx_n_s_gradient,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_image)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mask)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_warp", 0, 3, 4, 1); __PYX_ERR(0, 67, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_displacement)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_warp", 0, 3, 4, 2); __PYX_ERR(0, 67, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_warp") < 0)) __PYX_ERR(0, 67, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_image = ((PyArrayObject *)values[0]); __pyx_v_mask = ((PyArrayObject *)values[1]); __pyx_v_displacement = ((PyArrayObject *)values[2]); if (values[3]) { __pyx_v_gradient = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_gradient == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 70, __pyx_L3_error) } else { /* "pysteps/motion/_vet.pyx":70 * np.ndarray[int8, ndim=2] mask, * np.ndarray[float64, ndim=3] displacement, * bint gradient=False): # <<<<<<<<<<<<<< * """ * Morph image by applying a displacement field (Warping). */ __pyx_v_gradient = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_warp", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 67, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pysteps.motion._vet._warp", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_image), __pyx_ptype_5numpy_ndarray, 1, "image", 0))) __PYX_ERR(0, 67, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mask), __pyx_ptype_5numpy_ndarray, 1, "mask", 0))) __PYX_ERR(0, 68, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_displacement), __pyx_ptype_5numpy_ndarray, 1, "displacement", 0))) __PYX_ERR(0, 69, __pyx_L1_error) __pyx_r = __pyx_pf_7pysteps_6motion_4_vet__warp(__pyx_self, __pyx_v_image, __pyx_v_mask, __pyx_v_displacement, __pyx_v_gradient); /* "pysteps/motion/_vet.pyx":67 * @cython.nonecheck(False) * @cython.cdivision(True) * def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<< * np.ndarray[int8, ndim=2] mask, * np.ndarray[float64, ndim=3] displacement, */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7pysteps_6motion_4_vet__warp(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_image, PyArrayObject *__pyx_v_mask, PyArrayObject *__pyx_v_displacement, int __pyx_v_gradient) { __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_nx; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_ny; PyArrayObject *__pyx_v_new_image = 0; PyArrayObject *__pyx_v_morphed_mask = 0; PyArrayObject *__pyx_v_gradient_values = 0; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_max_int; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_max_int; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x_max_float; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y_max_float; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x_float; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y_float; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_dx; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_dy; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_floor; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_ceil; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_floor; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_ceil; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f00; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f10; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f01; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f11; __Pyx_LocalBuf_ND __pyx_pybuffernd_displacement; __Pyx_Buffer __pyx_pybuffer_displacement; __Pyx_LocalBuf_ND __pyx_pybuffernd_gradient_values; __Pyx_Buffer __pyx_pybuffer_gradient_values; __Pyx_LocalBuf_ND __pyx_pybuffernd_image; __Pyx_Buffer __pyx_pybuffer_image; __Pyx_LocalBuf_ND __pyx_pybuffernd_mask; __Pyx_Buffer __pyx_pybuffer_mask; __Pyx_LocalBuf_ND __pyx_pybuffernd_morphed_mask; __Pyx_Buffer __pyx_pybuffer_morphed_mask; __Pyx_LocalBuf_ND __pyx_pybuffernd_new_image; __Pyx_Buffer __pyx_pybuffer_new_image; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; PyArrayObject *__pyx_t_7 = NULL; PyArrayObject *__pyx_t_8 = NULL; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_9; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_10; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_11; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_12; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_13; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; int __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; Py_ssize_t __pyx_t_38; Py_ssize_t __pyx_t_39; Py_ssize_t __pyx_t_40; Py_ssize_t __pyx_t_41; Py_ssize_t __pyx_t_42; Py_ssize_t __pyx_t_43; Py_ssize_t __pyx_t_44; Py_ssize_t __pyx_t_45; Py_ssize_t __pyx_t_46; Py_ssize_t __pyx_t_47; Py_ssize_t __pyx_t_48; Py_ssize_t __pyx_t_49; Py_ssize_t __pyx_t_50; Py_ssize_t __pyx_t_51; Py_ssize_t __pyx_t_52; Py_ssize_t __pyx_t_53; Py_ssize_t __pyx_t_54; Py_ssize_t __pyx_t_55; Py_ssize_t __pyx_t_56; Py_ssize_t __pyx_t_57; Py_ssize_t __pyx_t_58; Py_ssize_t __pyx_t_59; Py_ssize_t __pyx_t_60; Py_ssize_t __pyx_t_61; Py_ssize_t __pyx_t_62; Py_ssize_t __pyx_t_63; Py_ssize_t __pyx_t_64; Py_ssize_t __pyx_t_65; Py_ssize_t __pyx_t_66; Py_ssize_t __pyx_t_67; Py_ssize_t __pyx_t_68; Py_ssize_t __pyx_t_69; Py_ssize_t __pyx_t_70; Py_ssize_t __pyx_t_71; Py_ssize_t __pyx_t_72; Py_ssize_t __pyx_t_73; Py_ssize_t __pyx_t_74; Py_ssize_t __pyx_t_75; __Pyx_RefNannySetupContext("_warp", 0); __pyx_pybuffer_new_image.pybuffer.buf = NULL; __pyx_pybuffer_new_image.refcount = 0; __pyx_pybuffernd_new_image.data = NULL; __pyx_pybuffernd_new_image.rcbuffer = &__pyx_pybuffer_new_image; __pyx_pybuffer_morphed_mask.pybuffer.buf = NULL; __pyx_pybuffer_morphed_mask.refcount = 0; __pyx_pybuffernd_morphed_mask.data = NULL; __pyx_pybuffernd_morphed_mask.rcbuffer = &__pyx_pybuffer_morphed_mask; __pyx_pybuffer_gradient_values.pybuffer.buf = NULL; __pyx_pybuffer_gradient_values.refcount = 0; __pyx_pybuffernd_gradient_values.data = NULL; __pyx_pybuffernd_gradient_values.rcbuffer = &__pyx_pybuffer_gradient_values; __pyx_pybuffer_image.pybuffer.buf = NULL; __pyx_pybuffer_image.refcount = 0; __pyx_pybuffernd_image.data = NULL; __pyx_pybuffernd_image.rcbuffer = &__pyx_pybuffer_image; __pyx_pybuffer_mask.pybuffer.buf = NULL; __pyx_pybuffer_mask.refcount = 0; __pyx_pybuffernd_mask.data = NULL; __pyx_pybuffernd_mask.rcbuffer = &__pyx_pybuffer_mask; __pyx_pybuffer_displacement.pybuffer.buf = NULL; __pyx_pybuffer_displacement.refcount = 0; __pyx_pybuffernd_displacement.data = NULL; __pyx_pybuffernd_displacement.rcbuffer = &__pyx_pybuffer_displacement; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 67, __pyx_L1_error) } __pyx_pybuffernd_image.diminfo[0].strides = __pyx_pybuffernd_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_image.diminfo[0].shape = __pyx_pybuffernd_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_image.diminfo[1].strides = __pyx_pybuffernd_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_image.diminfo[1].shape = __pyx_pybuffernd_image.rcbuffer->pybuffer.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 67, __pyx_L1_error) } __pyx_pybuffernd_mask.diminfo[0].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask.diminfo[0].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_mask.diminfo[1].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_mask.diminfo[1].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer, (PyObject*)__pyx_v_displacement, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 67, __pyx_L1_error) } __pyx_pybuffernd_displacement.diminfo[0].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_displacement.diminfo[0].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_displacement.diminfo[1].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_displacement.diminfo[1].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_displacement.diminfo[2].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_displacement.diminfo[2].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[2]; /* "pysteps/motion/_vet.pyx":130 * """ * * cdef intp nx = <intp> image.shape[0] # <<<<<<<<<<<<<< * cdef intp ny = <intp> image.shape[1] * */ __pyx_v_nx = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_image->dimensions[0])); /* "pysteps/motion/_vet.pyx":131 * * cdef intp nx = <intp> image.shape[0] * cdef intp ny = <intp> image.shape[1] # <<<<<<<<<<<<<< * * cdef np.ndarray[float64, ndim = 2] new_image = ( */ __pyx_v_ny = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_image->dimensions[1])); /* "pysteps/motion/_vet.pyx":134 * * cdef np.ndarray[float64, ndim = 2] new_image = ( * np.zeros([nx, ny], dtype=np.float64)) # <<<<<<<<<<<<<< * * cdef np.ndarray[int8, ndim = 2] morphed_mask = ( */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_nx); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_ny); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 134, __pyx_L1_error) __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_new_image.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { __pyx_v_new_image = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_new_image.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 133, __pyx_L1_error) } else {__pyx_pybuffernd_new_image.diminfo[0].strides = __pyx_pybuffernd_new_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_new_image.diminfo[0].shape = __pyx_pybuffernd_new_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_new_image.diminfo[1].strides = __pyx_pybuffernd_new_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_new_image.diminfo[1].shape = __pyx_pybuffernd_new_image.rcbuffer->pybuffer.shape[1]; } } __pyx_t_6 = 0; __pyx_v_new_image = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":137 * * cdef np.ndarray[int8, ndim = 2] morphed_mask = ( * np.zeros([nx, ny], dtype=np.int8)) # <<<<<<<<<<<<<< * * morphed_mask[mask > 0] = 1.0 */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_nx); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_ny); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __pyx_t_5 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_int8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 137, __pyx_L1_error) __pyx_t_7 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { __pyx_v_morphed_mask = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 136, __pyx_L1_error) } else {__pyx_pybuffernd_morphed_mask.diminfo[0].strides = __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morphed_mask.diminfo[0].shape = __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morphed_mask.diminfo[1].strides = __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morphed_mask.diminfo[1].shape = __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.shape[1]; } } __pyx_t_7 = 0; __pyx_v_morphed_mask = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "pysteps/motion/_vet.pyx":139 * np.zeros([nx, ny], dtype=np.int8)) * * morphed_mask[mask > 0] = 1.0 # <<<<<<<<<<<<<< * * cdef np.ndarray[float64, ndim = 3] gradient_values = ( */ __pyx_t_1 = PyObject_RichCompare(((PyObject *)__pyx_v_mask), __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error) if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_morphed_mask), __pyx_t_1, __pyx_float_1_0) < 0)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "pysteps/motion/_vet.pyx":142 * * cdef np.ndarray[float64, ndim = 3] gradient_values = ( * np.zeros([2, nx, ny], dtype=np.float64)) # <<<<<<<<<<<<<< * * cdef intp x, y */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_nx); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_ny); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyList_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_int_2); __Pyx_GIVEREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 142, __pyx_L1_error) __pyx_t_8 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_gradient_values.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { __pyx_v_gradient_values = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 141, __pyx_L1_error) } else {__pyx_pybuffernd_gradient_values.diminfo[0].strides = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_gradient_values.diminfo[0].shape = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_gradient_values.diminfo[1].strides = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_gradient_values.diminfo[1].shape = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_gradient_values.diminfo[2].strides = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_gradient_values.diminfo[2].shape = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.shape[2]; } } __pyx_t_8 = 0; __pyx_v_gradient_values = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":146 * cdef intp x, y * * cdef intp x_max_int = nx - 1 # <<<<<<<<<<<<<< * cdef intp y_max_int = ny - 1 * */ __pyx_v_x_max_int = (__pyx_v_nx - 1); /* "pysteps/motion/_vet.pyx":147 * * cdef intp x_max_int = nx - 1 * cdef intp y_max_int = ny - 1 # <<<<<<<<<<<<<< * * cdef float64 x_max_float = <float64> x_max_int */ __pyx_v_y_max_int = (__pyx_v_ny - 1); /* "pysteps/motion/_vet.pyx":149 * cdef intp y_max_int = ny - 1 * * cdef float64 x_max_float = <float64> x_max_int # <<<<<<<<<<<<<< * cdef float64 y_max_float = <float64> y_max_int * */ __pyx_v_x_max_float = ((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_x_max_int); /* "pysteps/motion/_vet.pyx":150 * * cdef float64 x_max_float = <float64> x_max_int * cdef float64 y_max_float = <float64> y_max_int # <<<<<<<<<<<<<< * * cdef float64 x_float, y_float, dx, dy */ __pyx_v_y_max_float = ((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_y_max_int); /* "pysteps/motion/_vet.pyx":161 * cdef float64 f00, f10, f01, f11 * * for x in prange(nx, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * * for y in range(ny): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_9 = __pyx_v_nx; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_11 = (__pyx_t_9 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_11 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41, __pyx_t_42, __pyx_t_43, __pyx_t_44, __pyx_t_45, __pyx_t_46, __pyx_t_47, __pyx_t_48, __pyx_t_49, __pyx_t_50, __pyx_t_51, __pyx_t_52, __pyx_t_53, __pyx_t_54, __pyx_t_55, __pyx_t_56, __pyx_t_57, __pyx_t_58, __pyx_t_59, __pyx_t_60, __pyx_t_61, __pyx_t_62, __pyx_t_63, __pyx_t_64, __pyx_t_65, __pyx_t_66, __pyx_t_67, __pyx_t_68, __pyx_t_69, __pyx_t_70, __pyx_t_71, __pyx_t_72, __pyx_t_73, __pyx_t_74, __pyx_t_75) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_dx) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_f00) lastprivate(__pyx_v_f01) lastprivate(__pyx_v_f10) lastprivate(__pyx_v_f11) firstprivate(__pyx_v_x) lastprivate(__pyx_v_x) lastprivate(__pyx_v_x_ceil) lastprivate(__pyx_v_x_float) lastprivate(__pyx_v_x_floor) lastprivate(__pyx_v_y) lastprivate(__pyx_v_y_ceil) lastprivate(__pyx_v_y_float) lastprivate(__pyx_v_y_floor) schedule(dynamic) #endif /* _OPENMP */ for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_11; __pyx_t_10++){ { __pyx_v_x = (__pyx_t_7pysteps_6motion_4_vet_intp)(0 + 1 * __pyx_t_10); /* Initialize private variables to invalid values */ __pyx_v_dx = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_dy = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_f00 = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_f01 = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_f10 = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_f11 = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_x_ceil = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_x_float = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_x_floor = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_y = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_y_ceil = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_y_float = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_y_floor = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); /* "pysteps/motion/_vet.pyx":163 * for x in prange(nx, schedule='dynamic', nogil=True): * * for y in range(ny): # <<<<<<<<<<<<<< * * x_float = (<float64> x) - displacement[0, x, y] */ __pyx_t_12 = __pyx_v_ny; __pyx_t_13 = __pyx_t_12; for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { __pyx_v_y = __pyx_t_14; /* "pysteps/motion/_vet.pyx":165 * for y in range(ny): * * x_float = (<float64> x) - displacement[0, x, y] # <<<<<<<<<<<<<< * y_float = (<float64> y) - displacement[1, x, y] * */ __pyx_t_15 = 0; __pyx_t_16 = __pyx_v_x; __pyx_t_17 = __pyx_v_y; __pyx_v_x_float = (((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_x) - (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_displacement.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_displacement.diminfo[0].strides, __pyx_t_16, __pyx_pybuffernd_displacement.diminfo[1].strides, __pyx_t_17, __pyx_pybuffernd_displacement.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":166 * * x_float = (<float64> x) - displacement[0, x, y] * y_float = (<float64> y) - displacement[1, x, y] # <<<<<<<<<<<<<< * * if x_float < 0: */ __pyx_t_18 = 1; __pyx_t_19 = __pyx_v_x; __pyx_t_20 = __pyx_v_y; __pyx_v_y_float = (((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_y) - (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_displacement.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_displacement.diminfo[0].strides, __pyx_t_19, __pyx_pybuffernd_displacement.diminfo[1].strides, __pyx_t_20, __pyx_pybuffernd_displacement.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":168 * y_float = (<float64> y) - displacement[1, x, y] * * if x_float < 0: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * x_float = 0 */ __pyx_t_21 = ((__pyx_v_x_float < 0.0) != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":169 * * if x_float < 0: * morphed_mask[x, y] = 1 # <<<<<<<<<<<<<< * x_float = 0 * x_floor = 0 */ __pyx_t_22 = __pyx_v_x; __pyx_t_23 = __pyx_v_y; *__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_23, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = 1; /* "pysteps/motion/_vet.pyx":170 * if x_float < 0: * morphed_mask[x, y] = 1 * x_float = 0 # <<<<<<<<<<<<<< * x_floor = 0 * x_ceil = 0 */ __pyx_v_x_float = 0.0; /* "pysteps/motion/_vet.pyx":171 * morphed_mask[x, y] = 1 * x_float = 0 * x_floor = 0 # <<<<<<<<<<<<<< * x_ceil = 0 * */ __pyx_v_x_floor = 0; /* "pysteps/motion/_vet.pyx":172 * x_float = 0 * x_floor = 0 * x_ceil = 0 # <<<<<<<<<<<<<< * * elif x_float > x_max_float: */ __pyx_v_x_ceil = 0; /* "pysteps/motion/_vet.pyx":168 * y_float = (<float64> y) - displacement[1, x, y] * * if x_float < 0: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * x_float = 0 */ goto __pyx_L12; } /* "pysteps/motion/_vet.pyx":174 * x_ceil = 0 * * elif x_float > x_max_float: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * x_float = x_max_float */ __pyx_t_21 = ((__pyx_v_x_float > __pyx_v_x_max_float) != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":175 * * elif x_float > x_max_float: * morphed_mask[x, y] = 1 # <<<<<<<<<<<<<< * x_float = x_max_float * x_floor = x_max_int */ __pyx_t_24 = __pyx_v_x; __pyx_t_25 = __pyx_v_y; *__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_25, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = 1; /* "pysteps/motion/_vet.pyx":176 * elif x_float > x_max_float: * morphed_mask[x, y] = 1 * x_float = x_max_float # <<<<<<<<<<<<<< * x_floor = x_max_int * x_ceil = x_max_int */ __pyx_v_x_float = __pyx_v_x_max_float; /* "pysteps/motion/_vet.pyx":177 * morphed_mask[x, y] = 1 * x_float = x_max_float * x_floor = x_max_int # <<<<<<<<<<<<<< * x_ceil = x_max_int * */ __pyx_v_x_floor = __pyx_v_x_max_int; /* "pysteps/motion/_vet.pyx":178 * x_float = x_max_float * x_floor = x_max_int * x_ceil = x_max_int # <<<<<<<<<<<<<< * * else: */ __pyx_v_x_ceil = __pyx_v_x_max_int; /* "pysteps/motion/_vet.pyx":174 * x_ceil = 0 * * elif x_float > x_max_float: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * x_float = x_max_float */ goto __pyx_L12; } /* "pysteps/motion/_vet.pyx":181 * * else: * x_floor = <intp> floor(x_float) # <<<<<<<<<<<<<< * x_ceil = x_floor + 1 * if x_ceil > x_max_int: */ /*else*/ { __pyx_v_x_floor = ((__pyx_t_7pysteps_6motion_4_vet_intp)floor(__pyx_v_x_float)); /* "pysteps/motion/_vet.pyx":182 * else: * x_floor = <intp> floor(x_float) * x_ceil = x_floor + 1 # <<<<<<<<<<<<<< * if x_ceil > x_max_int: * x_ceil = x_max_int */ __pyx_v_x_ceil = (__pyx_v_x_floor + 1); /* "pysteps/motion/_vet.pyx":183 * x_floor = <intp> floor(x_float) * x_ceil = x_floor + 1 * if x_ceil > x_max_int: # <<<<<<<<<<<<<< * x_ceil = x_max_int * */ __pyx_t_21 = ((__pyx_v_x_ceil > __pyx_v_x_max_int) != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":184 * x_ceil = x_floor + 1 * if x_ceil > x_max_int: * x_ceil = x_max_int # <<<<<<<<<<<<<< * * if y_float < 0: */ __pyx_v_x_ceil = __pyx_v_x_max_int; /* "pysteps/motion/_vet.pyx":183 * x_floor = <intp> floor(x_float) * x_ceil = x_floor + 1 * if x_ceil > x_max_int: # <<<<<<<<<<<<<< * x_ceil = x_max_int * */ } } __pyx_L12:; /* "pysteps/motion/_vet.pyx":186 * x_ceil = x_max_int * * if y_float < 0: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * y_float = 0 */ __pyx_t_21 = ((__pyx_v_y_float < 0.0) != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":187 * * if y_float < 0: * morphed_mask[x, y] = 1 # <<<<<<<<<<<<<< * y_float = 0 * y_floor = 0 */ __pyx_t_26 = __pyx_v_x; __pyx_t_27 = __pyx_v_y; *__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_27, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = 1; /* "pysteps/motion/_vet.pyx":188 * if y_float < 0: * morphed_mask[x, y] = 1 * y_float = 0 # <<<<<<<<<<<<<< * y_floor = 0 * y_ceil = 0 */ __pyx_v_y_float = 0.0; /* "pysteps/motion/_vet.pyx":189 * morphed_mask[x, y] = 1 * y_float = 0 * y_floor = 0 # <<<<<<<<<<<<<< * y_ceil = 0 * elif y_float > y_max_float: */ __pyx_v_y_floor = 0; /* "pysteps/motion/_vet.pyx":190 * y_float = 0 * y_floor = 0 * y_ceil = 0 # <<<<<<<<<<<<<< * elif y_float > y_max_float: * morphed_mask[x, y] = 1 */ __pyx_v_y_ceil = 0; /* "pysteps/motion/_vet.pyx":186 * x_ceil = x_max_int * * if y_float < 0: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * y_float = 0 */ goto __pyx_L14; } /* "pysteps/motion/_vet.pyx":191 * y_floor = 0 * y_ceil = 0 * elif y_float > y_max_float: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * y_float = y_max_float */ __pyx_t_21 = ((__pyx_v_y_float > __pyx_v_y_max_float) != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":192 * y_ceil = 0 * elif y_float > y_max_float: * morphed_mask[x, y] = 1 # <<<<<<<<<<<<<< * y_float = y_max_float * y_floor = y_max_int */ __pyx_t_28 = __pyx_v_x; __pyx_t_29 = __pyx_v_y; *__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_28, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_29, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = 1; /* "pysteps/motion/_vet.pyx":193 * elif y_float > y_max_float: * morphed_mask[x, y] = 1 * y_float = y_max_float # <<<<<<<<<<<<<< * y_floor = y_max_int * y_ceil = y_max_int */ __pyx_v_y_float = __pyx_v_y_max_float; /* "pysteps/motion/_vet.pyx":194 * morphed_mask[x, y] = 1 * y_float = y_max_float * y_floor = y_max_int # <<<<<<<<<<<<<< * y_ceil = y_max_int * else: */ __pyx_v_y_floor = __pyx_v_y_max_int; /* "pysteps/motion/_vet.pyx":195 * y_float = y_max_float * y_floor = y_max_int * y_ceil = y_max_int # <<<<<<<<<<<<<< * else: * y_floor = <intp> floor(y_float) */ __pyx_v_y_ceil = __pyx_v_y_max_int; /* "pysteps/motion/_vet.pyx":191 * y_floor = 0 * y_ceil = 0 * elif y_float > y_max_float: # <<<<<<<<<<<<<< * morphed_mask[x, y] = 1 * y_float = y_max_float */ goto __pyx_L14; } /* "pysteps/motion/_vet.pyx":197 * y_ceil = y_max_int * else: * y_floor = <intp> floor(y_float) # <<<<<<<<<<<<<< * y_ceil = y_floor + 1 * if y_ceil > y_max_int: */ /*else*/ { __pyx_v_y_floor = ((__pyx_t_7pysteps_6motion_4_vet_intp)floor(__pyx_v_y_float)); /* "pysteps/motion/_vet.pyx":198 * else: * y_floor = <intp> floor(y_float) * y_ceil = y_floor + 1 # <<<<<<<<<<<<<< * if y_ceil > y_max_int: * y_ceil = y_max_int */ __pyx_v_y_ceil = (__pyx_v_y_floor + 1); /* "pysteps/motion/_vet.pyx":199 * y_floor = <intp> floor(y_float) * y_ceil = y_floor + 1 * if y_ceil > y_max_int: # <<<<<<<<<<<<<< * y_ceil = y_max_int * */ __pyx_t_21 = ((__pyx_v_y_ceil > __pyx_v_y_max_int) != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":200 * y_ceil = y_floor + 1 * if y_ceil > y_max_int: * y_ceil = y_max_int # <<<<<<<<<<<<<< * * dx = x_float - <float64> x_floor */ __pyx_v_y_ceil = __pyx_v_y_max_int; /* "pysteps/motion/_vet.pyx":199 * y_floor = <intp> floor(y_float) * y_ceil = y_floor + 1 * if y_ceil > y_max_int: # <<<<<<<<<<<<<< * y_ceil = y_max_int * */ } } __pyx_L14:; /* "pysteps/motion/_vet.pyx":202 * y_ceil = y_max_int * * dx = x_float - <float64> x_floor # <<<<<<<<<<<<<< * dy = y_float - <float64> y_floor * */ __pyx_v_dx = (__pyx_v_x_float - ((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_x_floor)); /* "pysteps/motion/_vet.pyx":203 * * dx = x_float - <float64> x_floor * dy = y_float - <float64> y_floor # <<<<<<<<<<<<<< * * # This assumes that the spacing between grid points=1. */ __pyx_v_dy = (__pyx_v_y_float - ((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_y_floor)); /* "pysteps/motion/_vet.pyx":208 * * # Bilinear interpolation coeficients * f00 = image[x_floor, y_floor] # <<<<<<<<<<<<<< * f10 = image[x_ceil, y_floor] - image[x_floor, y_floor] * f01 = image[x_floor, y_ceil] - image[x_floor, y_floor] */ __pyx_t_30 = __pyx_v_x_floor; __pyx_t_31 = __pyx_v_y_floor; __pyx_v_f00 = (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_30, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_31, __pyx_pybuffernd_image.diminfo[1].strides)); /* "pysteps/motion/_vet.pyx":209 * # Bilinear interpolation coeficients * f00 = image[x_floor, y_floor] * f10 = image[x_ceil, y_floor] - image[x_floor, y_floor] # <<<<<<<<<<<<<< * f01 = image[x_floor, y_ceil] - image[x_floor, y_floor] * f11 = (image[x_floor, y_floor] - image[x_ceil, y_floor] */ __pyx_t_32 = __pyx_v_x_ceil; __pyx_t_33 = __pyx_v_y_floor; __pyx_t_34 = __pyx_v_x_floor; __pyx_t_35 = __pyx_v_y_floor; __pyx_v_f10 = ((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_32, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_33, __pyx_pybuffernd_image.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_34, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_35, __pyx_pybuffernd_image.diminfo[1].strides))); /* "pysteps/motion/_vet.pyx":210 * f00 = image[x_floor, y_floor] * f10 = image[x_ceil, y_floor] - image[x_floor, y_floor] * f01 = image[x_floor, y_ceil] - image[x_floor, y_floor] # <<<<<<<<<<<<<< * f11 = (image[x_floor, y_floor] - image[x_ceil, y_floor] * - image[x_floor, y_ceil] + image[x_ceil, y_ceil]) */ __pyx_t_36 = __pyx_v_x_floor; __pyx_t_37 = __pyx_v_y_ceil; __pyx_t_38 = __pyx_v_x_floor; __pyx_t_39 = __pyx_v_y_floor; __pyx_v_f01 = ((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_37, __pyx_pybuffernd_image.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_38, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_39, __pyx_pybuffernd_image.diminfo[1].strides))); /* "pysteps/motion/_vet.pyx":211 * f10 = image[x_ceil, y_floor] - image[x_floor, y_floor] * f01 = image[x_floor, y_ceil] - image[x_floor, y_floor] * f11 = (image[x_floor, y_floor] - image[x_ceil, y_floor] # <<<<<<<<<<<<<< * - image[x_floor, y_ceil] + image[x_ceil, y_ceil]) * */ __pyx_t_40 = __pyx_v_x_floor; __pyx_t_41 = __pyx_v_y_floor; __pyx_t_42 = __pyx_v_x_ceil; __pyx_t_43 = __pyx_v_y_floor; /* "pysteps/motion/_vet.pyx":212 * f01 = image[x_floor, y_ceil] - image[x_floor, y_floor] * f11 = (image[x_floor, y_floor] - image[x_ceil, y_floor] * - image[x_floor, y_ceil] + image[x_ceil, y_ceil]) # <<<<<<<<<<<<<< * * # Bilinear interpolation */ __pyx_t_44 = __pyx_v_x_floor; __pyx_t_45 = __pyx_v_y_ceil; __pyx_t_46 = __pyx_v_x_ceil; __pyx_t_47 = __pyx_v_y_ceil; __pyx_v_f11 = ((((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_40, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_41, __pyx_pybuffernd_image.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_42, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_43, __pyx_pybuffernd_image.diminfo[1].strides))) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_44, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_45, __pyx_pybuffernd_image.diminfo[1].strides))) + (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_46, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_47, __pyx_pybuffernd_image.diminfo[1].strides))); /* "pysteps/motion/_vet.pyx":215 * * # Bilinear interpolation * new_image[x, y] = f00 + dx * f10 + dy * f01 + dx * dy * f11 # <<<<<<<<<<<<<< * * if gradient: */ __pyx_t_48 = __pyx_v_x; __pyx_t_49 = __pyx_v_y; *__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_new_image.rcbuffer->pybuffer.buf, __pyx_t_48, __pyx_pybuffernd_new_image.diminfo[0].strides, __pyx_t_49, __pyx_pybuffernd_new_image.diminfo[1].strides) = (((__pyx_v_f00 + (__pyx_v_dx * __pyx_v_f10)) + (__pyx_v_dy * __pyx_v_f01)) + ((__pyx_v_dx * __pyx_v_dy) * __pyx_v_f11)); /* "pysteps/motion/_vet.pyx":217 * new_image[x, y] = f00 + dx * f10 + dy * f01 + dx * dy * f11 * * if gradient: # <<<<<<<<<<<<<< * gradient_values[0, x, y] = f10 + dy * f11 * gradient_values[1, x, y] = f01 + dx * f11 */ __pyx_t_21 = (__pyx_v_gradient != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":218 * * if gradient: * gradient_values[0, x, y] = f10 + dy * f11 # <<<<<<<<<<<<<< * gradient_values[1, x, y] = f01 + dx * f11 * */ __pyx_t_50 = 0; __pyx_t_51 = __pyx_v_x; __pyx_t_52 = __pyx_v_y; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.buf, __pyx_t_50, __pyx_pybuffernd_gradient_values.diminfo[0].strides, __pyx_t_51, __pyx_pybuffernd_gradient_values.diminfo[1].strides, __pyx_t_52, __pyx_pybuffernd_gradient_values.diminfo[2].strides) = (__pyx_v_f10 + (__pyx_v_dy * __pyx_v_f11)); /* "pysteps/motion/_vet.pyx":219 * if gradient: * gradient_values[0, x, y] = f10 + dy * f11 * gradient_values[1, x, y] = f01 + dx * f11 # <<<<<<<<<<<<<< * * f00 = mask[x_floor, y_floor] */ __pyx_t_53 = 1; __pyx_t_54 = __pyx_v_x; __pyx_t_55 = __pyx_v_y; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.buf, __pyx_t_53, __pyx_pybuffernd_gradient_values.diminfo[0].strides, __pyx_t_54, __pyx_pybuffernd_gradient_values.diminfo[1].strides, __pyx_t_55, __pyx_pybuffernd_gradient_values.diminfo[2].strides) = (__pyx_v_f01 + (__pyx_v_dx * __pyx_v_f11)); /* "pysteps/motion/_vet.pyx":217 * new_image[x, y] = f00 + dx * f10 + dy * f01 + dx * dy * f11 * * if gradient: # <<<<<<<<<<<<<< * gradient_values[0, x, y] = f10 + dy * f11 * gradient_values[1, x, y] = f01 + dx * f11 */ } /* "pysteps/motion/_vet.pyx":221 * gradient_values[1, x, y] = f01 + dx * f11 * * f00 = mask[x_floor, y_floor] # <<<<<<<<<<<<<< * f10 = mask[x_ceil, y_floor] - mask[x_floor, y_floor] * f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor] */ __pyx_t_56 = __pyx_v_x_floor; __pyx_t_57 = __pyx_v_y_floor; __pyx_v_f00 = (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_56, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_57, __pyx_pybuffernd_mask.diminfo[1].strides)); /* "pysteps/motion/_vet.pyx":222 * * f00 = mask[x_floor, y_floor] * f10 = mask[x_ceil, y_floor] - mask[x_floor, y_floor] # <<<<<<<<<<<<<< * f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor] * f11 = (mask[x_floor, y_floor] - mask[x_ceil, y_floor] */ __pyx_t_58 = __pyx_v_x_ceil; __pyx_t_59 = __pyx_v_y_floor; __pyx_t_60 = __pyx_v_x_floor; __pyx_t_61 = __pyx_v_y_floor; __pyx_v_f10 = ((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_58, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_59, __pyx_pybuffernd_mask.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_60, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_61, __pyx_pybuffernd_mask.diminfo[1].strides))); /* "pysteps/motion/_vet.pyx":223 * f00 = mask[x_floor, y_floor] * f10 = mask[x_ceil, y_floor] - mask[x_floor, y_floor] * f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor] # <<<<<<<<<<<<<< * f11 = (mask[x_floor, y_floor] - mask[x_ceil, y_floor] * - mask[x_floor, y_ceil] + mask[x_ceil, y_ceil]) */ __pyx_t_62 = __pyx_v_x_floor; __pyx_t_63 = __pyx_v_y_ceil; __pyx_t_64 = __pyx_v_x_floor; __pyx_t_65 = __pyx_v_y_floor; __pyx_v_f01 = ((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_62, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_63, __pyx_pybuffernd_mask.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_64, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_65, __pyx_pybuffernd_mask.diminfo[1].strides))); /* "pysteps/motion/_vet.pyx":224 * f10 = mask[x_ceil, y_floor] - mask[x_floor, y_floor] * f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor] * f11 = (mask[x_floor, y_floor] - mask[x_ceil, y_floor] # <<<<<<<<<<<<<< * - mask[x_floor, y_ceil] + mask[x_ceil, y_ceil]) * */ __pyx_t_66 = __pyx_v_x_floor; __pyx_t_67 = __pyx_v_y_floor; __pyx_t_68 = __pyx_v_x_ceil; __pyx_t_69 = __pyx_v_y_floor; /* "pysteps/motion/_vet.pyx":225 * f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor] * f11 = (mask[x_floor, y_floor] - mask[x_ceil, y_floor] * - mask[x_floor, y_ceil] + mask[x_ceil, y_ceil]) # <<<<<<<<<<<<<< * * morphed_mask[x, y] = <int8> (f00 + dx * f10 + dy * f01 */ __pyx_t_70 = __pyx_v_x_floor; __pyx_t_71 = __pyx_v_y_ceil; __pyx_t_72 = __pyx_v_x_ceil; __pyx_t_73 = __pyx_v_y_ceil; __pyx_v_f11 = ((((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_66, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_67, __pyx_pybuffernd_mask.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_68, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_69, __pyx_pybuffernd_mask.diminfo[1].strides))) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_70, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_71, __pyx_pybuffernd_mask.diminfo[1].strides))) + (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_72, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_73, __pyx_pybuffernd_mask.diminfo[1].strides))); /* "pysteps/motion/_vet.pyx":227 * - mask[x_floor, y_ceil] + mask[x_ceil, y_ceil]) * * morphed_mask[x, y] = <int8> (f00 + dx * f10 + dy * f01 # <<<<<<<<<<<<<< * + dx * dy * f11) * */ __pyx_t_74 = __pyx_v_x; __pyx_t_75 = __pyx_v_y; *__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_74, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_75, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = ((__pyx_t_7pysteps_6motion_4_vet_int8)(((__pyx_v_f00 + (__pyx_v_dx * __pyx_v_f10)) + (__pyx_v_dy * __pyx_v_f01)) + ((__pyx_v_dx * __pyx_v_dy) * __pyx_v_f11))); } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "pysteps/motion/_vet.pyx":161 * cdef float64 f00, f10, f01, f11 * * for x in prange(nx, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * * for y in range(ny): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "pysteps/motion/_vet.pyx":230 * + dx * dy * f11) * * morphed_mask[morphed_mask != 0] = 1 # <<<<<<<<<<<<<< * if gradient: * return new_image, morphed_mask, gradient_values */ __pyx_t_5 = PyObject_RichCompare(((PyObject *)__pyx_v_morphed_mask), __pyx_int_0, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 230, __pyx_L1_error) if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_morphed_mask), __pyx_t_5, __pyx_int_1) < 0)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":231 * * morphed_mask[morphed_mask != 0] = 1 * if gradient: # <<<<<<<<<<<<<< * return new_image, morphed_mask, gradient_values * else: */ __pyx_t_21 = (__pyx_v_gradient != 0); if (__pyx_t_21) { /* "pysteps/motion/_vet.pyx":232 * morphed_mask[morphed_mask != 0] = 1 * if gradient: * return new_image, morphed_mask, gradient_values # <<<<<<<<<<<<<< * else: * return new_image, morphed_mask */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_v_new_image)); __Pyx_GIVEREF(((PyObject *)__pyx_v_new_image)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_new_image)); __Pyx_INCREF(((PyObject *)__pyx_v_morphed_mask)); __Pyx_GIVEREF(((PyObject *)__pyx_v_morphed_mask)); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_morphed_mask)); __Pyx_INCREF(((PyObject *)__pyx_v_gradient_values)); __Pyx_GIVEREF(((PyObject *)__pyx_v_gradient_values)); PyTuple_SET_ITEM(__pyx_t_5, 2, ((PyObject *)__pyx_v_gradient_values)); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "pysteps/motion/_vet.pyx":231 * * morphed_mask[morphed_mask != 0] = 1 * if gradient: # <<<<<<<<<<<<<< * return new_image, morphed_mask, gradient_values * else: */ } /* "pysteps/motion/_vet.pyx":234 * return new_image, morphed_mask, gradient_values * else: * return new_image, morphed_mask # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_v_new_image)); __Pyx_GIVEREF(((PyObject *)__pyx_v_new_image)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_new_image)); __Pyx_INCREF(((PyObject *)__pyx_v_morphed_mask)); __Pyx_GIVEREF(((PyObject *)__pyx_v_morphed_mask)); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_morphed_mask)); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; } /* "pysteps/motion/_vet.pyx":67 * @cython.nonecheck(False) * @cython.cdivision(True) * def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<< * np.ndarray[int8, ndim=2] mask, * np.ndarray[float64, ndim=3] displacement, */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_gradient_values.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_new_image.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("pysteps.motion._vet._warp", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_gradient_values.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_new_image.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_new_image); __Pyx_XDECREF((PyObject *)__pyx_v_morphed_mask); __Pyx_XDECREF((PyObject *)__pyx_v_gradient_values); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "pysteps/motion/_vet.pyx":240 * @cython.nonecheck(False) * @cython.cdivision(True) * def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<< * np.ndarray[float64, ndim=2] template_image, * np.ndarray[float64, ndim=2] input_image, */ /* Python wrapper */ static PyObject *__pyx_pw_7pysteps_6motion_4_vet_3_cost_function(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_7pysteps_6motion_4_vet_2_cost_function[] = "\n Variational Echo Tracking Cost function.\n \n This function computes the Variational Echo Tracking (VET) \n Cost function presented by `Laroche and Zawazdki (1995)`_ and used in the \n McGill Algorithm for Prediction by Lagrangian Extrapolation (MAPLE) \n described in\n `Germann and Zawadzki (2002)`_.\n \n \n .. _`Laroche and Zawazdki (1995)`: http://dx.doi.org/10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2\n \n .. _`Germann and Zawadzki (2002)`: http://dx.doi.org/10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2\n \n \n The cost function is a the sum of the residuals of the squared image \n differences along with a smoothness constrain. \n \n This cost function implementation, supports displacement vector \n sectorization.\n The displacement vector represent the displacement applied to the pixels in\n each individual sector.\n \n This help to reduce the number of degrees of freedom of the cost function \n when hierarchical approaches are used to obtain the minima of \n the cost function (from low resolution to full image resolution).\n For example, in the MAPLE algorithm an Scaling Guess procedure is used to \n find the displacement vectors.\n The echo motion field is retrieved in three runs with increasing resolution.\n The retrieval starts with (left) a uniform field, which is used as a first \n guess to retrieve (middle) the field on a 5 \303\227 5 grid, which in turn is the \n first guess of (right) the final minimization with a 25 \303\227 25 grid\n \n The shape of the sector is deduced from the image shape and the displacement\n vector shape. \n \n IMPORTANT: The number of sectors in each dimension (x and y) must be a \n factor full image size.\n \n The value of displaced pixels that fall outside the limits takes the \n value of the nearest edge.\n \n The cost function is computed in parallel over the x axis.""\n \n .. _ndarray: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html\n \n Parameters\n ----------\n \n sector_displacement : ndarray_ (ndim=3) \n Array of displacements to apply to each sector. The dimensions are:\n sector_displacement [ x (0) or y (1) displacement, \n i index of sector, j index of sector ] \n \n \n template_image : ndarray_ (ndim=2)\n Input image array where the sector displacement is applied.\n \n input_image : ndarray_\n Image array to be used as reference \n \n smooth_gain : float\n Smoothness constrain gain\n\n mask : ndarray_ (ndim=2)\n Data mask. If is True, the data is marked as not valid and is not\n used in the computations.\n\n gradient : bool, optional\n If True, the gradient of the morphing function is returned.\n\n Returns\n -------\n \n penalty or gradient values.\n\n penalty : float\n Value of the cost function\n\n gradient_values : ndarray (float64 ,ndim = 3), optional\n If gradient keyword is True, the gradient of the function is also\n returned.\n \n \n References\n ----------\n \n Laroche, S., and I. Zawadzki, 1995: \n Retrievals of horizontal winds from single-Doppler clear-air data by methods\n of cross-correlation and variational analysis. \n J. Atmos. Oceanic Technol., 12, 721\342\200\223738.\n doi: http://dx.doi.org/10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2\n \n Germann, U. and I. Zawadzki, 2002: \n Scale-Dependence of the Predictability of Precipitation from Continental \n Radar Images.\n Part I: Description of the Methodology. Mon. Wea. Rev., 130, 2859\342\200\2232873,\n doi: 10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2. \n \n "; static PyMethodDef __pyx_mdef_7pysteps_6motion_4_vet_3_cost_function = {"_cost_function", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7pysteps_6motion_4_vet_3_cost_function, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7pysteps_6motion_4_vet_2_cost_function}; static PyObject *__pyx_pw_7pysteps_6motion_4_vet_3_cost_function(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_sector_displacement = 0; PyArrayObject *__pyx_v_template_image = 0; PyArrayObject *__pyx_v_input_image = 0; PyArrayObject *__pyx_v_mask = 0; float __pyx_v_smooth_gain; int __pyx_v_gradient; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_cost_function (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sector_displacement,&__pyx_n_s_template_image,&__pyx_n_s_input_image,&__pyx_n_s_mask,&__pyx_n_s_smooth_gain,&__pyx_n_s_gradient,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sector_displacement)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_template_image)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, 1); __PYX_ERR(0, 240, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_input_image)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, 2); __PYX_ERR(0, 240, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mask)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, 3); __PYX_ERR(0, 240, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_smooth_gain)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, 4); __PYX_ERR(0, 240, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient); if (value) { values[5] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_cost_function") < 0)) __PYX_ERR(0, 240, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_sector_displacement = ((PyArrayObject *)values[0]); __pyx_v_template_image = ((PyArrayObject *)values[1]); __pyx_v_input_image = ((PyArrayObject *)values[2]); __pyx_v_mask = ((PyArrayObject *)values[3]); __pyx_v_smooth_gain = __pyx_PyFloat_AsFloat(values[4]); if (unlikely((__pyx_v_smooth_gain == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 244, __pyx_L3_error) if (values[5]) { __pyx_v_gradient = __Pyx_PyObject_IsTrue(values[5]); if (unlikely((__pyx_v_gradient == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 245, __pyx_L3_error) } else { /* "pysteps/motion/_vet.pyx":245 * np.ndarray[int8, ndim=2] mask, * float smooth_gain, * bint gradient = False): # <<<<<<<<<<<<<< * """ * Variational Echo Tracking Cost function. */ __pyx_v_gradient = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 240, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pysteps.motion._vet._cost_function", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_sector_displacement), __pyx_ptype_5numpy_ndarray, 1, "sector_displacement", 0))) __PYX_ERR(0, 240, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_template_image), __pyx_ptype_5numpy_ndarray, 1, "template_image", 0))) __PYX_ERR(0, 241, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_input_image), __pyx_ptype_5numpy_ndarray, 1, "input_image", 0))) __PYX_ERR(0, 242, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mask), __pyx_ptype_5numpy_ndarray, 1, "mask", 0))) __PYX_ERR(0, 243, __pyx_L1_error) __pyx_r = __pyx_pf_7pysteps_6motion_4_vet_2_cost_function(__pyx_self, __pyx_v_sector_displacement, __pyx_v_template_image, __pyx_v_input_image, __pyx_v_mask, __pyx_v_smooth_gain, __pyx_v_gradient); /* "pysteps/motion/_vet.pyx":240 * @cython.nonecheck(False) * @cython.cdivision(True) * def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<< * np.ndarray[float64, ndim=2] template_image, * np.ndarray[float64, ndim=2] input_image, */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7pysteps_6motion_4_vet_2_cost_function(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_sector_displacement, PyArrayObject *__pyx_v_template_image, PyArrayObject *__pyx_v_input_image, PyArrayObject *__pyx_v_mask, float __pyx_v_smooth_gain, int __pyx_v_gradient) { __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_sectors; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_sectors; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_image_size; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_image_size; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_sector_size; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_sector_size; PyArrayObject *__pyx_v_displacement = 0; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_i; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_j; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_xy; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_l; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_m; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_l0; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_m0; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_l1; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_m1; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_i_shift; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_j_shift; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_axis; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_x_guess = 0; PyArrayObject *__pyx_v_y_guess = 0; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_sector_area; PyArrayObject *__pyx_v_interp_coef = 0; PyArrayObject *__pyx_v_l_i = 0; PyArrayObject *__pyx_v_m_j = 0; PyArrayObject *__pyx_v_i_min = 0; PyArrayObject *__pyx_v_i_max = 0; PyArrayObject *__pyx_v_j_min = 0; PyArrayObject *__pyx_v_j_max = 0; PyObject *__pyx_v_counts = NULL; PyArrayObject *__pyx_v_morphed_image = 0; PyArrayObject *__pyx_v_morph_mask = 0; PyArrayObject *__pyx_v__gradient_data = 0; PyArrayObject *__pyx_v_grad_residuals = 0; PyArrayObject *__pyx_v_grad_smooth = 0; PyArrayObject *__pyx_v_buffer = 0; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_residuals; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_smoothness_penalty; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_df_dx2; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_df_dxdy; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_df_dy2; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_inloop_smoothness_penalty; __Pyx_LocalBuf_ND __pyx_pybuffernd__gradient_data; __Pyx_Buffer __pyx_pybuffer__gradient_data; __Pyx_LocalBuf_ND __pyx_pybuffernd_buffer; __Pyx_Buffer __pyx_pybuffer_buffer; __Pyx_LocalBuf_ND __pyx_pybuffernd_displacement; __Pyx_Buffer __pyx_pybuffer_displacement; __Pyx_LocalBuf_ND __pyx_pybuffernd_grad_residuals; __Pyx_Buffer __pyx_pybuffer_grad_residuals; __Pyx_LocalBuf_ND __pyx_pybuffernd_grad_smooth; __Pyx_Buffer __pyx_pybuffer_grad_smooth; __Pyx_LocalBuf_ND __pyx_pybuffernd_i_max; __Pyx_Buffer __pyx_pybuffer_i_max; __Pyx_LocalBuf_ND __pyx_pybuffernd_i_min; __Pyx_Buffer __pyx_pybuffer_i_min; __Pyx_LocalBuf_ND __pyx_pybuffernd_input_image; __Pyx_Buffer __pyx_pybuffer_input_image; __Pyx_LocalBuf_ND __pyx_pybuffernd_interp_coef; __Pyx_Buffer __pyx_pybuffer_interp_coef; __Pyx_LocalBuf_ND __pyx_pybuffernd_j_max; __Pyx_Buffer __pyx_pybuffer_j_max; __Pyx_LocalBuf_ND __pyx_pybuffernd_j_min; __Pyx_Buffer __pyx_pybuffer_j_min; __Pyx_LocalBuf_ND __pyx_pybuffernd_l_i; __Pyx_Buffer __pyx_pybuffer_l_i; __Pyx_LocalBuf_ND __pyx_pybuffernd_m_j; __Pyx_Buffer __pyx_pybuffer_m_j; __Pyx_LocalBuf_ND __pyx_pybuffernd_mask; __Pyx_Buffer __pyx_pybuffer_mask; __Pyx_LocalBuf_ND __pyx_pybuffernd_morph_mask; __Pyx_Buffer __pyx_pybuffer_morph_mask; __Pyx_LocalBuf_ND __pyx_pybuffernd_morphed_image; __Pyx_Buffer __pyx_pybuffer_morphed_image; __Pyx_LocalBuf_ND __pyx_pybuffernd_sector_displacement; __Pyx_Buffer __pyx_pybuffer_sector_displacement; __Pyx_LocalBuf_ND __pyx_pybuffernd_template_image; __Pyx_Buffer __pyx_pybuffer_template_image; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_x_guess; __Pyx_Buffer __pyx_pybuffer_x_guess; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_y_guess; __Pyx_Buffer __pyx_pybuffer_y_guess; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyArrayObject *__pyx_t_7 = NULL; PyArrayObject *__pyx_t_8 = NULL; int __pyx_t_9; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; PyArrayObject *__pyx_t_13 = NULL; PyArrayObject *__pyx_t_14 = NULL; PyArrayObject *__pyx_t_15 = NULL; PyArrayObject *__pyx_t_16 = NULL; PyArrayObject *__pyx_t_17 = NULL; PyArrayObject *__pyx_t_18 = NULL; PyArrayObject *__pyx_t_19 = NULL; PyArrayObject *__pyx_t_20 = NULL; PyArrayObject *__pyx_t_21 = NULL; PyArrayObject *__pyx_t_22 = NULL; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_23; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_24; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_25; Py_ssize_t __pyx_t_26; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_27; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_28; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; Py_ssize_t __pyx_t_38; Py_ssize_t __pyx_t_39; Py_ssize_t __pyx_t_40; Py_ssize_t __pyx_t_41; Py_ssize_t __pyx_t_42; Py_ssize_t __pyx_t_43; Py_ssize_t __pyx_t_44; Py_ssize_t __pyx_t_45; Py_ssize_t __pyx_t_46; Py_ssize_t __pyx_t_47; Py_ssize_t __pyx_t_48; Py_ssize_t __pyx_t_49; Py_ssize_t __pyx_t_50; Py_ssize_t __pyx_t_51; Py_ssize_t __pyx_t_52; Py_ssize_t __pyx_t_53; Py_ssize_t __pyx_t_54; Py_ssize_t __pyx_t_55; Py_ssize_t __pyx_t_56; Py_ssize_t __pyx_t_57; Py_ssize_t __pyx_t_58; Py_ssize_t __pyx_t_59; Py_ssize_t __pyx_t_60; Py_ssize_t __pyx_t_61; Py_ssize_t __pyx_t_62; Py_ssize_t __pyx_t_63; Py_ssize_t __pyx_t_64; Py_ssize_t __pyx_t_65; Py_ssize_t __pyx_t_66; Py_ssize_t __pyx_t_67; Py_ssize_t __pyx_t_68; Py_ssize_t __pyx_t_69; Py_ssize_t __pyx_t_70; Py_ssize_t __pyx_t_71; Py_ssize_t __pyx_t_72; Py_ssize_t __pyx_t_73; Py_ssize_t __pyx_t_74; Py_ssize_t __pyx_t_75; Py_ssize_t __pyx_t_76; Py_ssize_t __pyx_t_77; Py_ssize_t __pyx_t_78; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_79; Py_ssize_t __pyx_t_80; Py_ssize_t __pyx_t_81; Py_ssize_t __pyx_t_82; Py_ssize_t __pyx_t_83; Py_ssize_t __pyx_t_84; Py_ssize_t __pyx_t_85; Py_ssize_t __pyx_t_86; Py_ssize_t __pyx_t_87; Py_ssize_t __pyx_t_88; Py_ssize_t __pyx_t_89; Py_ssize_t __pyx_t_90; Py_ssize_t __pyx_t_91; Py_ssize_t __pyx_t_92; Py_ssize_t __pyx_t_93; Py_ssize_t __pyx_t_94; Py_ssize_t __pyx_t_95; Py_ssize_t __pyx_t_96; Py_ssize_t __pyx_t_97; Py_ssize_t __pyx_t_98; Py_ssize_t __pyx_t_99; Py_ssize_t __pyx_t_100; Py_ssize_t __pyx_t_101; Py_ssize_t __pyx_t_102; Py_ssize_t __pyx_t_103; Py_ssize_t __pyx_t_104; Py_ssize_t __pyx_t_105; Py_ssize_t __pyx_t_106; Py_ssize_t __pyx_t_107; PyObject *(*__pyx_t_108)(PyObject *); PyObject *__pyx_t_109 = NULL; PyObject *(*__pyx_t_110)(PyObject *); Py_ssize_t __pyx_t_111; Py_ssize_t __pyx_t_112; Py_ssize_t __pyx_t_113; Py_ssize_t __pyx_t_114; PyArrayObject *__pyx_t_115 = NULL; PyArrayObject *__pyx_t_116 = NULL; PyArrayObject *__pyx_t_117 = NULL; PyArrayObject *__pyx_t_118 = NULL; PyArrayObject *__pyx_t_119 = NULL; PyArrayObject *__pyx_t_120 = NULL; PyObject *__pyx_t_121 = NULL; Py_ssize_t __pyx_t_122; Py_ssize_t __pyx_t_123; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_124; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_125; Py_ssize_t __pyx_t_126; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_127; Py_ssize_t __pyx_t_128; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_129; __pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_130; Py_ssize_t __pyx_t_131; Py_ssize_t __pyx_t_132; Py_ssize_t __pyx_t_133; Py_ssize_t __pyx_t_134; Py_ssize_t __pyx_t_135; Py_ssize_t __pyx_t_136; Py_ssize_t __pyx_t_137; Py_ssize_t __pyx_t_138; Py_ssize_t __pyx_t_139; Py_ssize_t __pyx_t_140; Py_ssize_t __pyx_t_141; Py_ssize_t __pyx_t_142; Py_ssize_t __pyx_t_143; Py_ssize_t __pyx_t_144; Py_ssize_t __pyx_t_145; Py_ssize_t __pyx_t_146; Py_ssize_t __pyx_t_147; Py_ssize_t __pyx_t_148; Py_ssize_t __pyx_t_149; Py_ssize_t __pyx_t_150; Py_ssize_t __pyx_t_151; Py_ssize_t __pyx_t_152; Py_ssize_t __pyx_t_153; Py_ssize_t __pyx_t_154; Py_ssize_t __pyx_t_155; Py_ssize_t __pyx_t_156; Py_ssize_t __pyx_t_157; Py_ssize_t __pyx_t_158; Py_ssize_t __pyx_t_159; Py_ssize_t __pyx_t_160; Py_ssize_t __pyx_t_161; Py_ssize_t __pyx_t_162; Py_ssize_t __pyx_t_163; Py_ssize_t __pyx_t_164; Py_ssize_t __pyx_t_165; Py_ssize_t __pyx_t_166; Py_ssize_t __pyx_t_167; Py_ssize_t __pyx_t_168; Py_ssize_t __pyx_t_169; Py_ssize_t __pyx_t_170; Py_ssize_t __pyx_t_171; Py_ssize_t __pyx_t_172; Py_ssize_t __pyx_t_173; Py_ssize_t __pyx_t_174; Py_ssize_t __pyx_t_175; Py_ssize_t __pyx_t_176; Py_ssize_t __pyx_t_177; Py_ssize_t __pyx_t_178; Py_ssize_t __pyx_t_179; Py_ssize_t __pyx_t_180; Py_ssize_t __pyx_t_181; Py_ssize_t __pyx_t_182; Py_ssize_t __pyx_t_183; Py_ssize_t __pyx_t_184; Py_ssize_t __pyx_t_185; Py_ssize_t __pyx_t_186; Py_ssize_t __pyx_t_187; Py_ssize_t __pyx_t_188; Py_ssize_t __pyx_t_189; Py_ssize_t __pyx_t_190; Py_ssize_t __pyx_t_191; Py_ssize_t __pyx_t_192; Py_ssize_t __pyx_t_193; Py_ssize_t __pyx_t_194; Py_ssize_t __pyx_t_195; Py_ssize_t __pyx_t_196; Py_ssize_t __pyx_t_197; Py_ssize_t __pyx_t_198; Py_ssize_t __pyx_t_199; Py_ssize_t __pyx_t_200; Py_ssize_t __pyx_t_201; Py_ssize_t __pyx_t_202; Py_ssize_t __pyx_t_203; Py_ssize_t __pyx_t_204; Py_ssize_t __pyx_t_205; Py_ssize_t __pyx_t_206; Py_ssize_t __pyx_t_207; Py_ssize_t __pyx_t_208; Py_ssize_t __pyx_t_209; Py_ssize_t __pyx_t_210; Py_ssize_t __pyx_t_211; Py_ssize_t __pyx_t_212; Py_ssize_t __pyx_t_213; Py_ssize_t __pyx_t_214; Py_ssize_t __pyx_t_215; Py_ssize_t __pyx_t_216; Py_ssize_t __pyx_t_217; Py_ssize_t __pyx_t_218; Py_ssize_t __pyx_t_219; Py_ssize_t __pyx_t_220; Py_ssize_t __pyx_t_221; Py_ssize_t __pyx_t_222; Py_ssize_t __pyx_t_223; Py_ssize_t __pyx_t_224; Py_ssize_t __pyx_t_225; Py_ssize_t __pyx_t_226; __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_t_227; long __pyx_t_228; long __pyx_t_229; long __pyx_t_230; long __pyx_t_231; Py_ssize_t __pyx_t_232; Py_ssize_t __pyx_t_233; Py_ssize_t __pyx_t_234; Py_ssize_t __pyx_t_235; Py_ssize_t __pyx_t_236; Py_ssize_t __pyx_t_237; Py_ssize_t __pyx_t_238; Py_ssize_t __pyx_t_239; Py_ssize_t __pyx_t_240; Py_ssize_t __pyx_t_241; Py_ssize_t __pyx_t_242; Py_ssize_t __pyx_t_243; Py_ssize_t __pyx_t_244; Py_ssize_t __pyx_t_245; Py_ssize_t __pyx_t_246; Py_ssize_t __pyx_t_247; Py_ssize_t __pyx_t_248; Py_ssize_t __pyx_t_249; Py_ssize_t __pyx_t_250; Py_ssize_t __pyx_t_251; Py_ssize_t __pyx_t_252; Py_ssize_t __pyx_t_253; Py_ssize_t __pyx_t_254; Py_ssize_t __pyx_t_255; Py_ssize_t __pyx_t_256; Py_ssize_t __pyx_t_257; Py_ssize_t __pyx_t_258; Py_ssize_t __pyx_t_259; Py_ssize_t __pyx_t_260; Py_ssize_t __pyx_t_261; Py_ssize_t __pyx_t_262; Py_ssize_t __pyx_t_263; Py_ssize_t __pyx_t_264; Py_ssize_t __pyx_t_265; Py_ssize_t __pyx_t_266; Py_ssize_t __pyx_t_267; Py_ssize_t __pyx_t_268; Py_ssize_t __pyx_t_269; Py_ssize_t __pyx_t_270; Py_ssize_t __pyx_t_271; Py_ssize_t __pyx_t_272; Py_ssize_t __pyx_t_273; Py_ssize_t __pyx_t_274; Py_ssize_t __pyx_t_275; Py_ssize_t __pyx_t_276; Py_ssize_t __pyx_t_277; Py_ssize_t __pyx_t_278; Py_ssize_t __pyx_t_279; Py_ssize_t __pyx_t_280; Py_ssize_t __pyx_t_281; Py_ssize_t __pyx_t_282; Py_ssize_t __pyx_t_283; Py_ssize_t __pyx_t_284; Py_ssize_t __pyx_t_285; Py_ssize_t __pyx_t_286; Py_ssize_t __pyx_t_287; Py_ssize_t __pyx_t_288; Py_ssize_t __pyx_t_289; Py_ssize_t __pyx_t_290; Py_ssize_t __pyx_t_291; __Pyx_RefNannySetupContext("_cost_function", 0); __pyx_pybuffer_displacement.pybuffer.buf = NULL; __pyx_pybuffer_displacement.refcount = 0; __pyx_pybuffernd_displacement.data = NULL; __pyx_pybuffernd_displacement.rcbuffer = &__pyx_pybuffer_displacement; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_x_guess.pybuffer.buf = NULL; __pyx_pybuffer_x_guess.refcount = 0; __pyx_pybuffernd_x_guess.data = NULL; __pyx_pybuffernd_x_guess.rcbuffer = &__pyx_pybuffer_x_guess; __pyx_pybuffer_y_guess.pybuffer.buf = NULL; __pyx_pybuffer_y_guess.refcount = 0; __pyx_pybuffernd_y_guess.data = NULL; __pyx_pybuffernd_y_guess.rcbuffer = &__pyx_pybuffer_y_guess; __pyx_pybuffer_interp_coef.pybuffer.buf = NULL; __pyx_pybuffer_interp_coef.refcount = 0; __pyx_pybuffernd_interp_coef.data = NULL; __pyx_pybuffernd_interp_coef.rcbuffer = &__pyx_pybuffer_interp_coef; __pyx_pybuffer_l_i.pybuffer.buf = NULL; __pyx_pybuffer_l_i.refcount = 0; __pyx_pybuffernd_l_i.data = NULL; __pyx_pybuffernd_l_i.rcbuffer = &__pyx_pybuffer_l_i; __pyx_pybuffer_m_j.pybuffer.buf = NULL; __pyx_pybuffer_m_j.refcount = 0; __pyx_pybuffernd_m_j.data = NULL; __pyx_pybuffernd_m_j.rcbuffer = &__pyx_pybuffer_m_j; __pyx_pybuffer_i_min.pybuffer.buf = NULL; __pyx_pybuffer_i_min.refcount = 0; __pyx_pybuffernd_i_min.data = NULL; __pyx_pybuffernd_i_min.rcbuffer = &__pyx_pybuffer_i_min; __pyx_pybuffer_i_max.pybuffer.buf = NULL; __pyx_pybuffer_i_max.refcount = 0; __pyx_pybuffernd_i_max.data = NULL; __pyx_pybuffernd_i_max.rcbuffer = &__pyx_pybuffer_i_max; __pyx_pybuffer_j_min.pybuffer.buf = NULL; __pyx_pybuffer_j_min.refcount = 0; __pyx_pybuffernd_j_min.data = NULL; __pyx_pybuffernd_j_min.rcbuffer = &__pyx_pybuffer_j_min; __pyx_pybuffer_j_max.pybuffer.buf = NULL; __pyx_pybuffer_j_max.refcount = 0; __pyx_pybuffernd_j_max.data = NULL; __pyx_pybuffernd_j_max.rcbuffer = &__pyx_pybuffer_j_max; __pyx_pybuffer_morphed_image.pybuffer.buf = NULL; __pyx_pybuffer_morphed_image.refcount = 0; __pyx_pybuffernd_morphed_image.data = NULL; __pyx_pybuffernd_morphed_image.rcbuffer = &__pyx_pybuffer_morphed_image; __pyx_pybuffer_morph_mask.pybuffer.buf = NULL; __pyx_pybuffer_morph_mask.refcount = 0; __pyx_pybuffernd_morph_mask.data = NULL; __pyx_pybuffernd_morph_mask.rcbuffer = &__pyx_pybuffer_morph_mask; __pyx_pybuffer__gradient_data.pybuffer.buf = NULL; __pyx_pybuffer__gradient_data.refcount = 0; __pyx_pybuffernd__gradient_data.data = NULL; __pyx_pybuffernd__gradient_data.rcbuffer = &__pyx_pybuffer__gradient_data; __pyx_pybuffer_grad_residuals.pybuffer.buf = NULL; __pyx_pybuffer_grad_residuals.refcount = 0; __pyx_pybuffernd_grad_residuals.data = NULL; __pyx_pybuffernd_grad_residuals.rcbuffer = &__pyx_pybuffer_grad_residuals; __pyx_pybuffer_grad_smooth.pybuffer.buf = NULL; __pyx_pybuffer_grad_smooth.refcount = 0; __pyx_pybuffernd_grad_smooth.data = NULL; __pyx_pybuffernd_grad_smooth.rcbuffer = &__pyx_pybuffer_grad_smooth; __pyx_pybuffer_buffer.pybuffer.buf = NULL; __pyx_pybuffer_buffer.refcount = 0; __pyx_pybuffernd_buffer.data = NULL; __pyx_pybuffernd_buffer.rcbuffer = &__pyx_pybuffer_buffer; __pyx_pybuffer_sector_displacement.pybuffer.buf = NULL; __pyx_pybuffer_sector_displacement.refcount = 0; __pyx_pybuffernd_sector_displacement.data = NULL; __pyx_pybuffernd_sector_displacement.rcbuffer = &__pyx_pybuffer_sector_displacement; __pyx_pybuffer_template_image.pybuffer.buf = NULL; __pyx_pybuffer_template_image.refcount = 0; __pyx_pybuffernd_template_image.data = NULL; __pyx_pybuffernd_template_image.rcbuffer = &__pyx_pybuffer_template_image; __pyx_pybuffer_input_image.pybuffer.buf = NULL; __pyx_pybuffer_input_image.refcount = 0; __pyx_pybuffernd_input_image.data = NULL; __pyx_pybuffernd_input_image.rcbuffer = &__pyx_pybuffer_input_image; __pyx_pybuffer_mask.pybuffer.buf = NULL; __pyx_pybuffer_mask.refcount = 0; __pyx_pybuffernd_mask.data = NULL; __pyx_pybuffernd_mask.rcbuffer = &__pyx_pybuffer_mask; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer, (PyObject*)__pyx_v_sector_displacement, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 240, __pyx_L1_error) } __pyx_pybuffernd_sector_displacement.diminfo[0].strides = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_sector_displacement.diminfo[0].shape = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_sector_displacement.diminfo[1].strides = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_sector_displacement.diminfo[1].shape = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_sector_displacement.diminfo[2].strides = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_sector_displacement.diminfo[2].shape = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.shape[2]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_template_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_template_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 240, __pyx_L1_error) } __pyx_pybuffernd_template_image.diminfo[0].strides = __pyx_pybuffernd_template_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_template_image.diminfo[0].shape = __pyx_pybuffernd_template_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_template_image.diminfo[1].strides = __pyx_pybuffernd_template_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_template_image.diminfo[1].shape = __pyx_pybuffernd_template_image.rcbuffer->pybuffer.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_input_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_input_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 240, __pyx_L1_error) } __pyx_pybuffernd_input_image.diminfo[0].strides = __pyx_pybuffernd_input_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_input_image.diminfo[0].shape = __pyx_pybuffernd_input_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_input_image.diminfo[1].strides = __pyx_pybuffernd_input_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_input_image.diminfo[1].shape = __pyx_pybuffernd_input_image.rcbuffer->pybuffer.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 240, __pyx_L1_error) } __pyx_pybuffernd_mask.diminfo[0].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask.diminfo[0].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_mask.diminfo[1].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_mask.diminfo[1].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[1]; /* "pysteps/motion/_vet.pyx":350 * """ * * cdef intp x_sectors = <intp> sector_displacement.shape[1] # <<<<<<<<<<<<<< * cdef intp y_sectors = <intp> sector_displacement.shape[2] * */ __pyx_v_x_sectors = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_sector_displacement->dimensions[1])); /* "pysteps/motion/_vet.pyx":351 * * cdef intp x_sectors = <intp> sector_displacement.shape[1] * cdef intp y_sectors = <intp> sector_displacement.shape[2] # <<<<<<<<<<<<<< * * cdef intp x_image_size = <intp> template_image.shape[0] */ __pyx_v_y_sectors = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_sector_displacement->dimensions[2])); /* "pysteps/motion/_vet.pyx":353 * cdef intp y_sectors = <intp> sector_displacement.shape[2] * * cdef intp x_image_size = <intp> template_image.shape[0] # <<<<<<<<<<<<<< * cdef intp y_image_size = <intp> template_image.shape[1] * */ __pyx_v_x_image_size = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_template_image->dimensions[0])); /* "pysteps/motion/_vet.pyx":354 * * cdef intp x_image_size = <intp> template_image.shape[0] * cdef intp y_image_size = <intp> template_image.shape[1] # <<<<<<<<<<<<<< * * if x_image_size % x_sectors != 0: */ __pyx_v_y_image_size = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_template_image->dimensions[1])); /* "pysteps/motion/_vet.pyx":356 * cdef intp y_image_size = <intp> template_image.shape[1] * * if x_image_size % x_sectors != 0: # <<<<<<<<<<<<<< * raise ValueError("Error computing cost function.\n", * "The number of sectors in x axis (axis=0)" */ __pyx_t_1 = (((__pyx_v_x_image_size % __pyx_v_x_sectors) != 0) != 0); if (unlikely(__pyx_t_1)) { /* "pysteps/motion/_vet.pyx":357 * * if x_image_size % x_sectors != 0: * raise ValueError("Error computing cost function.\n", # <<<<<<<<<<<<<< * "The number of sectors in x axis (axis=0)" * + " don't divide the image size") */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 357, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 357, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":356 * cdef intp y_image_size = <intp> template_image.shape[1] * * if x_image_size % x_sectors != 0: # <<<<<<<<<<<<<< * raise ValueError("Error computing cost function.\n", * "The number of sectors in x axis (axis=0)" */ } /* "pysteps/motion/_vet.pyx":361 * + " don't divide the image size") * * if y_image_size % y_sectors != 0: # <<<<<<<<<<<<<< * raise ValueError("Error computing cost function.\n", * "The number of sectors in y axis (axis=1) don't" */ __pyx_t_1 = (((__pyx_v_y_image_size % __pyx_v_y_sectors) != 0) != 0); if (unlikely(__pyx_t_1)) { /* "pysteps/motion/_vet.pyx":362 * * if y_image_size % y_sectors != 0: * raise ValueError("Error computing cost function.\n", # <<<<<<<<<<<<<< * "The number of sectors in y axis (axis=1) don't" * + " divide the image size") */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 362, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 362, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":361 * + " don't divide the image size") * * if y_image_size % y_sectors != 0: # <<<<<<<<<<<<<< * raise ValueError("Error computing cost function.\n", * "The number of sectors in y axis (axis=1) don't" */ } /* "pysteps/motion/_vet.pyx":367 * * cdef intp x_sector_size = ( * <intp> (round(x_image_size / x_sectors))) # <<<<<<<<<<<<<< * * cdef intp y_sector_size = ( */ __pyx_v_x_sector_size = ((__pyx_t_7pysteps_6motion_4_vet_intp)round((__pyx_v_x_image_size / __pyx_v_x_sectors))); /* "pysteps/motion/_vet.pyx":370 * * cdef intp y_sector_size = ( * <intp> (round(y_image_size / y_sectors))) # <<<<<<<<<<<<<< * * cdef np.ndarray[float64, ndim = 3] displacement = ( */ __pyx_v_y_sector_size = ((__pyx_t_7pysteps_6motion_4_vet_intp)round((__pyx_v_y_image_size / __pyx_v_y_sectors))); /* "pysteps/motion/_vet.pyx":373 * * cdef np.ndarray[float64, ndim = 3] displacement = ( * np.zeros([2, x_image_size, y_image_size], dtype=np.float64)) # <<<<<<<<<<<<<< * * cdef intp i, j, xy, l, m, ll, mm, i_sec, j_sec */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_int_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyList_SET_ITEM(__pyx_t_5, 2, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float64); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 373, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 373, __pyx_L1_error) __pyx_t_7 = ((PyArrayObject *)__pyx_t_6); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { __pyx_v_displacement = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_displacement.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 372, __pyx_L1_error) } else {__pyx_pybuffernd_displacement.diminfo[0].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_displacement.diminfo[0].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_displacement.diminfo[1].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_displacement.diminfo[1].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_displacement.diminfo[2].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_displacement.diminfo[2].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[2]; } } __pyx_t_7 = 0; __pyx_v_displacement = ((PyArrayObject *)__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":378 * cdef intp l0, m0, l1, m1, i_shift, j_shift, axis * * i_shift = (x_sector_size // 2) # <<<<<<<<<<<<<< * j_shift = (y_sector_size // 2) * */ __pyx_v_i_shift = (__pyx_v_x_sector_size / 2); /* "pysteps/motion/_vet.pyx":379 * * i_shift = (x_sector_size // 2) * j_shift = (y_sector_size // 2) # <<<<<<<<<<<<<< * * #Assume regular grid with constant grid spacing. */ __pyx_v_j_shift = (__pyx_v_y_sector_size / 2); /* "pysteps/motion/_vet.pyx":385 * cdef np.ndarray[float64, ndim = 1] x * cdef np.ndarray[float64, ndim = 1] y * x = np.arange(x_image_size, dtype='float64') # <<<<<<<<<<<<<< * y = np.arange(y_image_size, dtype='float64') * */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_arange); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_n_u_float64) < 0) __PYX_ERR(0, 385, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 385, __pyx_L1_error) __pyx_t_8 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 385, __pyx_L1_error) } __pyx_t_8 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":386 * cdef np.ndarray[float64, ndim = 1] y * x = np.arange(x_image_size, dtype='float64') * y = np.arange(y_image_size, dtype='float64') # <<<<<<<<<<<<<< * * cdef np.ndarray[float64, ndim = 1] x_guess */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_arange); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_n_u_float64) < 0) __PYX_ERR(0, 386, __pyx_L1_error) __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 386, __pyx_L1_error) __pyx_t_13 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_13, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 386, __pyx_L1_error) } __pyx_t_13 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":391 * cdef np.ndarray[float64, ndim = 1] y_guess * * x_guess = x.reshape((x_sectors, x_sector_size)).mean(axis=1) # <<<<<<<<<<<<<< * y_guess = y.reshape((y_sectors, y_sector_size)).mean(axis=1) * */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_x), __pyx_n_s_reshape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sector_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_6); __pyx_t_4 = 0; __pyx_t_6 = 0; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_5 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_6, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_mean); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_axis, __pyx_int_1) < 0) __PYX_ERR(0, 391, __pyx_L1_error) __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 391, __pyx_L1_error) __pyx_t_14 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer, (PyObject*)__pyx_t_14, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer, (PyObject*)__pyx_v_x_guess, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_x_guess.diminfo[0].strides = __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x_guess.diminfo[0].shape = __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 391, __pyx_L1_error) } __pyx_t_14 = 0; __pyx_v_x_guess = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":392 * * x_guess = x.reshape((x_sectors, x_sector_size)).mean(axis=1) * y_guess = y.reshape((y_sectors, y_sector_size)).mean(axis=1) # <<<<<<<<<<<<<< * * cdef float64 sector_area */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_y), __pyx_n_s_reshape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sector_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_6); __pyx_t_3 = 0; __pyx_t_6 = 0; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_2 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_mean); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_axis, __pyx_int_1) < 0) __PYX_ERR(0, 392, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 392, __pyx_L1_error) __pyx_t_15 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer, (PyObject*)__pyx_v_y_guess, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd_y_guess.diminfo[0].strides = __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y_guess.diminfo[0].shape = __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 392, __pyx_L1_error) } __pyx_t_15 = 0; __pyx_v_y_guess = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "pysteps/motion/_vet.pyx":398 * cdef np.ndarray[float64, ndim = 3] interp_coef * * interp_coef = np.zeros([4, x_image_size, y_image_size], dtype=np.float64) # <<<<<<<<<<<<<< * * cdef np.ndarray[intp, ndim = 1] l_i = np.zeros(x_image_size, dtype=np.intp) */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); PyList_SET_ITEM(__pyx_t_6, 0, __pyx_int_4); __Pyx_GIVEREF(__pyx_t_4); PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_5); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 398, __pyx_L1_error) __pyx_t_16 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer, (PyObject*)__pyx_t_16, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer, (PyObject*)__pyx_v_interp_coef, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_interp_coef.diminfo[0].strides = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_interp_coef.diminfo[0].shape = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_interp_coef.diminfo[1].strides = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_interp_coef.diminfo[1].shape = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_interp_coef.diminfo[2].strides = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_interp_coef.diminfo[2].shape = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 398, __pyx_L1_error) } __pyx_t_16 = 0; __pyx_v_interp_coef = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":400 * interp_coef = np.zeros([4, x_image_size, y_image_size], dtype=np.float64) * * cdef np.ndarray[intp, ndim = 1] l_i = np.zeros(x_image_size, dtype=np.intp) # <<<<<<<<<<<<<< * cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp) * */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_intp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 400, __pyx_L1_error) __pyx_t_17 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_l_i.rcbuffer->pybuffer, (PyObject*)__pyx_t_17, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_l_i = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_l_i.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 400, __pyx_L1_error) } else {__pyx_pybuffernd_l_i.diminfo[0].strides = __pyx_pybuffernd_l_i.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_l_i.diminfo[0].shape = __pyx_pybuffernd_l_i.rcbuffer->pybuffer.shape[0]; } } __pyx_t_17 = 0; __pyx_v_l_i = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "pysteps/motion/_vet.pyx":401 * * cdef np.ndarray[intp, ndim = 1] l_i = np.zeros(x_image_size, dtype=np.intp) * cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp) # <<<<<<<<<<<<<< * * cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_intp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 401, __pyx_L1_error) __pyx_t_18 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_m_j.rcbuffer->pybuffer, (PyObject*)__pyx_t_18, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_m_j = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_m_j.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 401, __pyx_L1_error) } else {__pyx_pybuffernd_m_j.diminfo[0].strides = __pyx_pybuffernd_m_j.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_m_j.diminfo[0].shape = __pyx_pybuffernd_m_j.rcbuffer->pybuffer.shape[0]; } } __pyx_t_18 = 0; __pyx_v_m_j = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":403 * cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, # <<<<<<<<<<<<<< * x_image_size, * dtype=np.intp) */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_full); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "pysteps/motion/_vet.pyx":404 * * cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, * x_image_size, # <<<<<<<<<<<<<< * dtype=np.intp) * */ __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 404, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); /* "pysteps/motion/_vet.pyx":403 * cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, # <<<<<<<<<<<<<< * x_image_size, * dtype=np.intp) */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); __pyx_t_2 = 0; __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":405 * cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, * x_image_size, * dtype=np.intp) # <<<<<<<<<<<<<< * * cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, */ __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 405, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 405, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_intp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 405, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 405, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":403 * cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, # <<<<<<<<<<<<<< * x_image_size, * dtype=np.intp) */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 403, __pyx_L1_error) __pyx_t_19 = ((PyArrayObject *)__pyx_t_6); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_i_min.rcbuffer->pybuffer, (PyObject*)__pyx_t_19, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_i_min = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 403, __pyx_L1_error) } else {__pyx_pybuffernd_i_min.diminfo[0].strides = __pyx_pybuffernd_i_min.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_i_min.diminfo[0].shape = __pyx_pybuffernd_i_min.rcbuffer->pybuffer.shape[0]; } } __pyx_t_19 = 0; __pyx_v_i_min = ((PyArrayObject *)__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":407 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, # <<<<<<<<<<<<<< * x_image_size, * dtype=np.intp) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_full); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); /* "pysteps/motion/_vet.pyx":408 * * cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, * x_image_size, # <<<<<<<<<<<<<< * dtype=np.intp) * */ __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "pysteps/motion/_vet.pyx":407 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, # <<<<<<<<<<<<<< * x_image_size, * dtype=np.intp) */ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_6 = 0; __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":409 * cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, * x_image_size, * dtype=np.intp) # <<<<<<<<<<<<<< * * cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_intp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 409, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":407 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, # <<<<<<<<<<<<<< * x_image_size, * dtype=np.intp) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 407, __pyx_L1_error) __pyx_t_20 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_i_max.rcbuffer->pybuffer, (PyObject*)__pyx_t_20, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_i_max = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 407, __pyx_L1_error) } else {__pyx_pybuffernd_i_max.diminfo[0].strides = __pyx_pybuffernd_i_max.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_i_max.diminfo[0].shape = __pyx_pybuffernd_i_max.rcbuffer->pybuffer.shape[0]; } } __pyx_t_20 = 0; __pyx_v_i_max = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":411 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, # <<<<<<<<<<<<<< * y_image_size, * dtype=np.intp) */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_full); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "pysteps/motion/_vet.pyx":412 * * cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, * y_image_size, # <<<<<<<<<<<<<< * dtype=np.intp) * */ __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 412, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); /* "pysteps/motion/_vet.pyx":411 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, # <<<<<<<<<<<<<< * y_image_size, * dtype=np.intp) */ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; /* "pysteps/motion/_vet.pyx":413 * cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, * y_image_size, * dtype=np.intp) # <<<<<<<<<<<<<< * * cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, */ __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_intp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":411 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, # <<<<<<<<<<<<<< * y_image_size, * dtype=np.intp) */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 411, __pyx_L1_error) __pyx_t_21 = ((PyArrayObject *)__pyx_t_6); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_j_min.rcbuffer->pybuffer, (PyObject*)__pyx_t_21, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_j_min = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 411, __pyx_L1_error) } else {__pyx_pybuffernd_j_min.diminfo[0].strides = __pyx_pybuffernd_j_min.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_j_min.diminfo[0].shape = __pyx_pybuffernd_j_min.rcbuffer->pybuffer.shape[0]; } } __pyx_t_21 = 0; __pyx_v_j_min = ((PyArrayObject *)__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":415 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, # <<<<<<<<<<<<<< * y_image_size, * dtype=np.intp) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_full); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); /* "pysteps/motion/_vet.pyx":416 * * cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, * y_image_size, # <<<<<<<<<<<<<< * dtype=np.intp) * */ __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); /* "pysteps/motion/_vet.pyx":415 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, # <<<<<<<<<<<<<< * y_image_size, * dtype=np.intp) */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); __pyx_t_6 = 0; __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":417 * cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, * y_image_size, * dtype=np.intp) # <<<<<<<<<<<<<< * * for i in prange(x_image_size, schedule='dynamic', nogil=True): */ __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_intp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":415 * dtype=np.intp) * * cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, # <<<<<<<<<<<<<< * y_image_size, * dtype=np.intp) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 415, __pyx_L1_error) __pyx_t_22 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_j_max.rcbuffer->pybuffer, (PyObject*)__pyx_t_22, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_j_max = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 415, __pyx_L1_error) } else {__pyx_pybuffernd_j_max.diminfo[0].strides = __pyx_pybuffernd_j_max.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_j_max.diminfo[0].shape = __pyx_pybuffernd_j_max.rcbuffer->pybuffer.shape[0]; } } __pyx_t_22 = 0; __pyx_v_j_max = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":419 * dtype=np.intp) * * for i in prange(x_image_size, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * * l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_23 = __pyx_v_x_image_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_25 = (__pyx_t_23 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_25 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_100, __pyx_t_101, __pyx_t_102, __pyx_t_103, __pyx_t_104, __pyx_t_105, __pyx_t_106, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41, __pyx_t_42, __pyx_t_43, __pyx_t_44, __pyx_t_45, __pyx_t_46, __pyx_t_47, __pyx_t_48, __pyx_t_49, __pyx_t_50, __pyx_t_51, __pyx_t_52, __pyx_t_53, __pyx_t_54, __pyx_t_55, __pyx_t_56, __pyx_t_57, __pyx_t_58, __pyx_t_59, __pyx_t_60, __pyx_t_61, __pyx_t_62, __pyx_t_63, __pyx_t_64, __pyx_t_65, __pyx_t_66, __pyx_t_67, __pyx_t_68, __pyx_t_69, __pyx_t_70, __pyx_t_71, __pyx_t_72, __pyx_t_73, __pyx_t_74, __pyx_t_75, __pyx_t_76, __pyx_t_77, __pyx_t_78, __pyx_t_79, __pyx_t_80, __pyx_t_81, __pyx_t_82, __pyx_t_83, __pyx_t_84, __pyx_t_85, __pyx_t_86, __pyx_t_87, __pyx_t_88, __pyx_t_89, __pyx_t_90, __pyx_t_91, __pyx_t_92, __pyx_t_93, __pyx_t_94, __pyx_t_95, __pyx_t_96, __pyx_t_97, __pyx_t_98, __pyx_t_99) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_l0) lastprivate(__pyx_v_l1) lastprivate(__pyx_v_m0) lastprivate(__pyx_v_m1) lastprivate(__pyx_v_sector_area) lastprivate(__pyx_v_xy) schedule(dynamic) #endif /* _OPENMP */ for (__pyx_t_24 = 0; __pyx_t_24 < __pyx_t_25; __pyx_t_24++){ { __pyx_v_i = (__pyx_t_7pysteps_6motion_4_vet_intp)(0 + 1 * __pyx_t_24); /* Initialize private variables to invalid values */ __pyx_v_j = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_l0 = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_l1 = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_m0 = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_m1 = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); __pyx_v_sector_area = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN()); __pyx_v_xy = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0); /* "pysteps/motion/_vet.pyx":421 * for i in prange(x_image_size, schedule='dynamic', nogil=True): * * l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2) # <<<<<<<<<<<<<< * l0 = int_max(l0, 0) * l1 = l0 + 1 */ __pyx_v_l0 = __pyx_f_7pysteps_6motion_4_vet_int_min(((__pyx_v_i - __pyx_v_i_shift) / __pyx_v_x_sector_size), (__pyx_v_x_sectors - 2)); /* "pysteps/motion/_vet.pyx":422 * * l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2) * l0 = int_max(l0, 0) # <<<<<<<<<<<<<< * l1 = l0 + 1 * */ __pyx_v_l0 = __pyx_f_7pysteps_6motion_4_vet_int_max(__pyx_v_l0, 0); /* "pysteps/motion/_vet.pyx":423 * l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2) * l0 = int_max(l0, 0) * l1 = l0 + 1 # <<<<<<<<<<<<<< * * l_i[i] = l0 */ __pyx_v_l1 = (__pyx_v_l0 + 1); /* "pysteps/motion/_vet.pyx":425 * l1 = l0 + 1 * * l_i[i] = l0 # <<<<<<<<<<<<<< * * for j in range(y_image_size): */ __pyx_t_26 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_l_i.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_l_i.diminfo[0].strides) = __pyx_v_l0; /* "pysteps/motion/_vet.pyx":427 * l_i[i] = l0 * * for j in range(y_image_size): # <<<<<<<<<<<<<< * m0 = int_min((j - j_shift) // y_sector_size, y_sectors - 2) * m0 = int_max(m0, 0) */ __pyx_t_27 = __pyx_v_y_image_size; __pyx_t_28 = __pyx_t_27; for (__pyx_t_29 = 0; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) { __pyx_v_j = __pyx_t_29; /* "pysteps/motion/_vet.pyx":428 * * for j in range(y_image_size): * m0 = int_min((j - j_shift) // y_sector_size, y_sectors - 2) # <<<<<<<<<<<<<< * m0 = int_max(m0, 0) * m1 = m0 + 1 */ __pyx_v_m0 = __pyx_f_7pysteps_6motion_4_vet_int_min(((__pyx_v_j - __pyx_v_j_shift) / __pyx_v_y_sector_size), (__pyx_v_y_sectors - 2)); /* "pysteps/motion/_vet.pyx":429 * for j in range(y_image_size): * m0 = int_min((j - j_shift) // y_sector_size, y_sectors - 2) * m0 = int_max(m0, 0) # <<<<<<<<<<<<<< * m1 = m0 + 1 * */ __pyx_v_m0 = __pyx_f_7pysteps_6motion_4_vet_int_max(__pyx_v_m0, 0); /* "pysteps/motion/_vet.pyx":430 * m0 = int_min((j - j_shift) // y_sector_size, y_sectors - 2) * m0 = int_max(m0, 0) * m1 = m0 + 1 # <<<<<<<<<<<<<< * * m_j[j] = m0 */ __pyx_v_m1 = (__pyx_v_m0 + 1); /* "pysteps/motion/_vet.pyx":432 * m1 = m0 + 1 * * m_j[j] = m0 # <<<<<<<<<<<<<< * * sector_area = (x_guess[l1] - x_guess[l0]) * (y_guess[m1] - y_guess[m0]) */ __pyx_t_30 = __pyx_v_j; *__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_m_j.rcbuffer->pybuffer.buf, __pyx_t_30, __pyx_pybuffernd_m_j.diminfo[0].strides) = __pyx_v_m0; /* "pysteps/motion/_vet.pyx":434 * m_j[j] = m0 * * sector_area = (x_guess[l1] - x_guess[l0]) * (y_guess[m1] - y_guess[m0]) # <<<<<<<<<<<<<< * * interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1] */ __pyx_t_31 = __pyx_v_l1; __pyx_t_32 = __pyx_v_l0; __pyx_t_33 = __pyx_v_m1; __pyx_t_34 = __pyx_v_m0; __pyx_v_sector_area = (((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_31, __pyx_pybuffernd_x_guess.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_32, __pyx_pybuffernd_x_guess.diminfo[0].strides))) * ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_33, __pyx_pybuffernd_y_guess.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_34, __pyx_pybuffernd_y_guess.diminfo[0].strides)))); /* "pysteps/motion/_vet.pyx":436 * sector_area = (x_guess[l1] - x_guess[l0]) * (y_guess[m1] - y_guess[m0]) * * interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1] # <<<<<<<<<<<<<< * - x[i] * y_guess[m1] * - x_guess[l1] * y[j] */ __pyx_t_35 = __pyx_v_l1; __pyx_t_36 = __pyx_v_m1; /* "pysteps/motion/_vet.pyx":437 * * interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1] * - x[i] * y_guess[m1] # <<<<<<<<<<<<<< * - x_guess[l1] * y[j] * + x[i] * y[j]) / sector_area */ __pyx_t_37 = __pyx_v_i; __pyx_t_38 = __pyx_v_m1; /* "pysteps/motion/_vet.pyx":438 * interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1] * - x[i] * y_guess[m1] * - x_guess[l1] * y[j] # <<<<<<<<<<<<<< * + x[i] * y[j]) / sector_area * */ __pyx_t_39 = __pyx_v_l1; __pyx_t_40 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":439 * - x[i] * y_guess[m1] * - x_guess[l1] * y[j] * + x[i] * y[j]) / sector_area # <<<<<<<<<<<<<< * * interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0] */ __pyx_t_41 = __pyx_v_i; __pyx_t_42 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":436 * sector_area = (x_guess[l1] - x_guess[l0]) * (y_guess[m1] - y_guess[m0]) * * interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1] # <<<<<<<<<<<<<< * - x[i] * y_guess[m1] * - x_guess[l1] * y[j] */ __pyx_t_43 = 0; __pyx_t_44 = __pyx_v_i; __pyx_t_45 = __pyx_v_j; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_43, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_44, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_45, __pyx_pybuffernd_interp_coef.diminfo[2].strides) = ((((((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_35, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_y_guess.diminfo[0].strides))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_37, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_38, __pyx_pybuffernd_y_guess.diminfo[0].strides)))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_39, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_40, __pyx_pybuffernd_y.diminfo[0].strides)))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_41, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_42, __pyx_pybuffernd_y.diminfo[0].strides)))) / __pyx_v_sector_area); /* "pysteps/motion/_vet.pyx":441 * + x[i] * y[j]) / sector_area * * interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0] # <<<<<<<<<<<<<< * + x[i] * y_guess[m0] * + x_guess[l1] * y[j] */ __pyx_t_46 = __pyx_v_l1; __pyx_t_47 = __pyx_v_m0; /* "pysteps/motion/_vet.pyx":442 * * interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0] * + x[i] * y_guess[m0] # <<<<<<<<<<<<<< * + x_guess[l1] * y[j] * - x[i] * y[j]) / sector_area */ __pyx_t_48 = __pyx_v_i; __pyx_t_49 = __pyx_v_m0; /* "pysteps/motion/_vet.pyx":443 * interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0] * + x[i] * y_guess[m0] * + x_guess[l1] * y[j] # <<<<<<<<<<<<<< * - x[i] * y[j]) / sector_area * */ __pyx_t_50 = __pyx_v_l1; __pyx_t_51 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":444 * + x[i] * y_guess[m0] * + x_guess[l1] * y[j] * - x[i] * y[j]) / sector_area # <<<<<<<<<<<<<< * * interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1] */ __pyx_t_52 = __pyx_v_i; __pyx_t_53 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":441 * + x[i] * y[j]) / sector_area * * interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0] # <<<<<<<<<<<<<< * + x[i] * y_guess[m0] * + x_guess[l1] * y[j] */ __pyx_t_54 = 1; __pyx_t_55 = __pyx_v_i; __pyx_t_56 = __pyx_v_j; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_54, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_55, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_56, __pyx_pybuffernd_interp_coef.diminfo[2].strides) = ((((((-(*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_46, __pyx_pybuffernd_x_guess.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_47, __pyx_pybuffernd_y_guess.diminfo[0].strides))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_48, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_49, __pyx_pybuffernd_y_guess.diminfo[0].strides)))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_50, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_51, __pyx_pybuffernd_y.diminfo[0].strides)))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_52, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_53, __pyx_pybuffernd_y.diminfo[0].strides)))) / __pyx_v_sector_area); /* "pysteps/motion/_vet.pyx":446 * - x[i] * y[j]) / sector_area * * interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1] # <<<<<<<<<<<<<< * + x[i] * y_guess[m1] * + x_guess[l0] * y[j] */ __pyx_t_57 = __pyx_v_l0; __pyx_t_58 = __pyx_v_m1; /* "pysteps/motion/_vet.pyx":447 * * interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1] * + x[i] * y_guess[m1] # <<<<<<<<<<<<<< * + x_guess[l0] * y[j] * - x[i] * y[j]) / sector_area */ __pyx_t_59 = __pyx_v_i; __pyx_t_60 = __pyx_v_m1; /* "pysteps/motion/_vet.pyx":448 * interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1] * + x[i] * y_guess[m1] * + x_guess[l0] * y[j] # <<<<<<<<<<<<<< * - x[i] * y[j]) / sector_area * */ __pyx_t_61 = __pyx_v_l0; __pyx_t_62 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":449 * + x[i] * y_guess[m1] * + x_guess[l0] * y[j] * - x[i] * y[j]) / sector_area # <<<<<<<<<<<<<< * * interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0] */ __pyx_t_63 = __pyx_v_i; __pyx_t_64 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":446 * - x[i] * y[j]) / sector_area * * interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1] # <<<<<<<<<<<<<< * + x[i] * y_guess[m1] * + x_guess[l0] * y[j] */ __pyx_t_65 = 2; __pyx_t_66 = __pyx_v_i; __pyx_t_67 = __pyx_v_j; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_65, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_66, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_67, __pyx_pybuffernd_interp_coef.diminfo[2].strides) = ((((((-(*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_57, __pyx_pybuffernd_x_guess.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_58, __pyx_pybuffernd_y_guess.diminfo[0].strides))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_59, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_60, __pyx_pybuffernd_y_guess.diminfo[0].strides)))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_61, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_62, __pyx_pybuffernd_y.diminfo[0].strides)))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_63, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_64, __pyx_pybuffernd_y.diminfo[0].strides)))) / __pyx_v_sector_area); /* "pysteps/motion/_vet.pyx":451 * - x[i] * y[j]) / sector_area * * interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0] # <<<<<<<<<<<<<< * - x[i] * y_guess[m0] * - x_guess[l0] * y[j] */ __pyx_t_68 = __pyx_v_l0; __pyx_t_69 = __pyx_v_m0; /* "pysteps/motion/_vet.pyx":452 * * interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0] * - x[i] * y_guess[m0] # <<<<<<<<<<<<<< * - x_guess[l0] * y[j] * + x[i] * y[j]) / sector_area */ __pyx_t_70 = __pyx_v_i; __pyx_t_71 = __pyx_v_m0; /* "pysteps/motion/_vet.pyx":453 * interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0] * - x[i] * y_guess[m0] * - x_guess[l0] * y[j] # <<<<<<<<<<<<<< * + x[i] * y[j]) / sector_area * */ __pyx_t_72 = __pyx_v_l0; __pyx_t_73 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":454 * - x[i] * y_guess[m0] * - x_guess[l0] * y[j] * + x[i] * y[j]) / sector_area # <<<<<<<<<<<<<< * * for xy in range(2): */ __pyx_t_74 = __pyx_v_i; __pyx_t_75 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":451 * - x[i] * y[j]) / sector_area * * interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0] # <<<<<<<<<<<<<< * - x[i] * y_guess[m0] * - x_guess[l0] * y[j] */ __pyx_t_76 = 3; __pyx_t_77 = __pyx_v_i; __pyx_t_78 = __pyx_v_j; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_76, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_77, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_78, __pyx_pybuffernd_interp_coef.diminfo[2].strides) = ((((((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_68, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_69, __pyx_pybuffernd_y_guess.diminfo[0].strides))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_70, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_71, __pyx_pybuffernd_y_guess.diminfo[0].strides)))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_72, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_73, __pyx_pybuffernd_y.diminfo[0].strides)))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_74, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_75, __pyx_pybuffernd_y.diminfo[0].strides)))) / __pyx_v_sector_area); /* "pysteps/motion/_vet.pyx":456 * + x[i] * y[j]) / sector_area * * for xy in range(2): # <<<<<<<<<<<<<< * displacement[xy, i, j] = ( * sector_displacement[xy, l0, m0] * interp_coef[0, i, j] */ for (__pyx_t_79 = 0; __pyx_t_79 < 2; __pyx_t_79+=1) { __pyx_v_xy = __pyx_t_79; /* "pysteps/motion/_vet.pyx":458 * for xy in range(2): * displacement[xy, i, j] = ( * sector_displacement[xy, l0, m0] * interp_coef[0, i, j] # <<<<<<<<<<<<<< * + sector_displacement[xy, l0, m1] * interp_coef[1, i, j] * + sector_displacement[xy, l1, m0] * interp_coef[2, i, j] */ __pyx_t_80 = __pyx_v_xy; __pyx_t_81 = __pyx_v_l0; __pyx_t_82 = __pyx_v_m0; __pyx_t_83 = 0; __pyx_t_84 = __pyx_v_i; __pyx_t_85 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":459 * displacement[xy, i, j] = ( * sector_displacement[xy, l0, m0] * interp_coef[0, i, j] * + sector_displacement[xy, l0, m1] * interp_coef[1, i, j] # <<<<<<<<<<<<<< * + sector_displacement[xy, l1, m0] * interp_coef[2, i, j] * + sector_displacement[xy, l1, m1] * interp_coef[3, i, j] */ __pyx_t_86 = __pyx_v_xy; __pyx_t_87 = __pyx_v_l0; __pyx_t_88 = __pyx_v_m1; __pyx_t_89 = 1; __pyx_t_90 = __pyx_v_i; __pyx_t_91 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":460 * sector_displacement[xy, l0, m0] * interp_coef[0, i, j] * + sector_displacement[xy, l0, m1] * interp_coef[1, i, j] * + sector_displacement[xy, l1, m0] * interp_coef[2, i, j] # <<<<<<<<<<<<<< * + sector_displacement[xy, l1, m1] * interp_coef[3, i, j] * ) */ __pyx_t_92 = __pyx_v_xy; __pyx_t_93 = __pyx_v_l1; __pyx_t_94 = __pyx_v_m0; __pyx_t_95 = 2; __pyx_t_96 = __pyx_v_i; __pyx_t_97 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":461 * + sector_displacement[xy, l0, m1] * interp_coef[1, i, j] * + sector_displacement[xy, l1, m0] * interp_coef[2, i, j] * + sector_displacement[xy, l1, m1] * interp_coef[3, i, j] # <<<<<<<<<<<<<< * ) * */ __pyx_t_98 = __pyx_v_xy; __pyx_t_99 = __pyx_v_l1; __pyx_t_100 = __pyx_v_m1; __pyx_t_101 = 3; __pyx_t_102 = __pyx_v_i; __pyx_t_103 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":457 * * for xy in range(2): * displacement[xy, i, j] = ( # <<<<<<<<<<<<<< * sector_displacement[xy, l0, m0] * interp_coef[0, i, j] * + sector_displacement[xy, l0, m1] * interp_coef[1, i, j] */ __pyx_t_104 = __pyx_v_xy; __pyx_t_105 = __pyx_v_i; __pyx_t_106 = __pyx_v_j; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_displacement.rcbuffer->pybuffer.buf, __pyx_t_104, __pyx_pybuffernd_displacement.diminfo[0].strides, __pyx_t_105, __pyx_pybuffernd_displacement.diminfo[1].strides, __pyx_t_106, __pyx_pybuffernd_displacement.diminfo[2].strides) = (((((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_80, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_81, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_82, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_83, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_84, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_85, __pyx_pybuffernd_interp_coef.diminfo[2].strides))) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_86, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_87, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_88, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_89, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_90, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_91, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_92, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_93, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_94, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_95, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_96, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_97, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_98, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_99, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_100, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_101, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_102, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_103, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))); } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "pysteps/motion/_vet.pyx":419 * dtype=np.intp) * * for i in prange(x_image_size, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * * l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L7; } __pyx_L7:; } } /* "pysteps/motion/_vet.pyx":464 * ) * * for l, i, counts in zip(*np.unique(l_i, # <<<<<<<<<<<<<< * return_index=True, * return_counts=True)): */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_unique); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_l_i)); __Pyx_GIVEREF(((PyObject *)__pyx_v_l_i)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_l_i)); /* "pysteps/motion/_vet.pyx":465 * * for l, i, counts in zip(*np.unique(l_i, * return_index=True, # <<<<<<<<<<<<<< * return_counts=True)): * i_min[l] = i */ __pyx_t_3 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 465, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_return_index, Py_True) < 0) __PYX_ERR(0, 465, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":466 * for l, i, counts in zip(*np.unique(l_i, * return_index=True, * return_counts=True)): # <<<<<<<<<<<<<< * i_min[l] = i * i_max[l] = i + counts - 1 */ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_return_counts, Py_True) < 0) __PYX_ERR(0, 465, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":464 * ) * * for l, i, counts in zip(*np.unique(l_i, # <<<<<<<<<<<<<< * return_index=True, * return_counts=True)): */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PySequence_Tuple(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_zip, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (likely(PyList_CheckExact(__pyx_t_4)) || PyTuple_CheckExact(__pyx_t_4)) { __pyx_t_3 = __pyx_t_4; __Pyx_INCREF(__pyx_t_3); __pyx_t_107 = 0; __pyx_t_108 = NULL; } else { __pyx_t_107 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_108 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_108)) __PYX_ERR(0, 464, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; for (;;) { if (likely(!__pyx_t_108)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_107 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_107); __Pyx_INCREF(__pyx_t_4); __pyx_t_107++; if (unlikely(0 < 0)) __PYX_ERR(0, 464, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_3, __pyx_t_107); __pyx_t_107++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { if (__pyx_t_107 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_107); __Pyx_INCREF(__pyx_t_4); __pyx_t_107++; if (unlikely(0 < 0)) __PYX_ERR(0, 464, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_3, __pyx_t_107); __pyx_t_107++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } } else { __pyx_t_4 = __pyx_t_108(__pyx_t_3); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 464, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_4); } if ((likely(PyTuple_CheckExact(__pyx_t_4))) || (PyList_CheckExact(__pyx_t_4))) { PyObject* sequence = __pyx_t_4; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 3)) { if (size > 3) __Pyx_RaiseTooManyValuesError(3); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 464, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __pyx_t_6 = PyTuple_GET_ITEM(sequence, 2); } else { __pyx_t_2 = PyList_GET_ITEM(sequence, 0); __pyx_t_5 = PyList_GET_ITEM(sequence, 1); __pyx_t_6 = PyList_GET_ITEM(sequence, 2); } __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); #endif __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else { Py_ssize_t index = -1; __pyx_t_109 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_109)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_109); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_110 = Py_TYPE(__pyx_t_109)->tp_iternext; index = 0; __pyx_t_2 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_2)) goto __pyx_L20_unpacking_failed; __Pyx_GOTREF(__pyx_t_2); index = 1; __pyx_t_5 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_5)) goto __pyx_L20_unpacking_failed; __Pyx_GOTREF(__pyx_t_5); index = 2; __pyx_t_6 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_6)) goto __pyx_L20_unpacking_failed; __Pyx_GOTREF(__pyx_t_6); if (__Pyx_IternextUnpackEndCheck(__pyx_t_110(__pyx_t_109), 3) < 0) __PYX_ERR(0, 464, __pyx_L1_error) __pyx_t_110 = NULL; __Pyx_DECREF(__pyx_t_109); __pyx_t_109 = 0; goto __pyx_L21_unpacking_done; __pyx_L20_unpacking_failed:; __Pyx_DECREF(__pyx_t_109); __pyx_t_109 = 0; __pyx_t_110 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 464, __pyx_L1_error) __pyx_L21_unpacking_done:; } __pyx_t_25 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_2); if (unlikely((__pyx_t_25 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_24 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_5); if (unlikely((__pyx_t_24 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_l = __pyx_t_25; __pyx_v_i = __pyx_t_24; __Pyx_XDECREF_SET(__pyx_v_counts, __pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":467 * return_index=True, * return_counts=True)): * i_min[l] = i # <<<<<<<<<<<<<< * i_max[l] = i + counts - 1 * */ __pyx_t_111 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_111, __pyx_pybuffernd_i_min.diminfo[0].strides) = __pyx_v_i; /* "pysteps/motion/_vet.pyx":468 * return_counts=True)): * i_min[l] = i * i_max[l] = i + counts - 1 # <<<<<<<<<<<<<< * * for m, j, counts in zip(*np.unique(m_j, */ __pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_i); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 468, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_4, __pyx_v_counts); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 468, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_SubtractObjC(__pyx_t_6, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 468, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_24 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_4); if (unlikely((__pyx_t_24 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 468, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_112 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_112, __pyx_pybuffernd_i_max.diminfo[0].strides) = __pyx_t_24; /* "pysteps/motion/_vet.pyx":464 * ) * * for l, i, counts in zip(*np.unique(l_i, # <<<<<<<<<<<<<< * return_index=True, * return_counts=True)): */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":470 * i_max[l] = i + counts - 1 * * for m, j, counts in zip(*np.unique(m_j, # <<<<<<<<<<<<<< * return_index=True, * return_counts=True)): */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_unique); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_m_j)); __Pyx_GIVEREF(((PyObject *)__pyx_v_m_j)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_m_j)); /* "pysteps/motion/_vet.pyx":471 * * for m, j, counts in zip(*np.unique(m_j, * return_index=True, # <<<<<<<<<<<<<< * return_counts=True)): * j_min[m] = j */ __pyx_t_6 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 471, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_return_index, Py_True) < 0) __PYX_ERR(0, 471, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":472 * for m, j, counts in zip(*np.unique(m_j, * return_index=True, * return_counts=True)): # <<<<<<<<<<<<<< * j_min[m] = j * j_max[m] = j + counts */ if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_return_counts, Py_True) < 0) __PYX_ERR(0, 471, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":470 * i_max[l] = i + counts - 1 * * for m, j, counts in zip(*np.unique(m_j, # <<<<<<<<<<<<<< * return_index=True, * return_counts=True)): */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PySequence_Tuple(__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_zip, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_t_5)) || PyTuple_CheckExact(__pyx_t_5)) { __pyx_t_6 = __pyx_t_5; __Pyx_INCREF(__pyx_t_6); __pyx_t_107 = 0; __pyx_t_108 = NULL; } else { __pyx_t_107 = -1; __pyx_t_6 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_108 = Py_TYPE(__pyx_t_6)->tp_iternext; if (unlikely(!__pyx_t_108)) __PYX_ERR(0, 470, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; for (;;) { if (likely(!__pyx_t_108)) { if (likely(PyList_CheckExact(__pyx_t_6))) { if (__pyx_t_107 >= PyList_GET_SIZE(__pyx_t_6)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_6, __pyx_t_107); __Pyx_INCREF(__pyx_t_5); __pyx_t_107++; if (unlikely(0 < 0)) __PYX_ERR(0, 470, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_6, __pyx_t_107); __pyx_t_107++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_107 >= PyTuple_GET_SIZE(__pyx_t_6)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_6, __pyx_t_107); __Pyx_INCREF(__pyx_t_5); __pyx_t_107++; if (unlikely(0 < 0)) __PYX_ERR(0, 470, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_6, __pyx_t_107); __pyx_t_107++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_108(__pyx_t_6); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 470, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } if ((likely(PyTuple_CheckExact(__pyx_t_5))) || (PyList_CheckExact(__pyx_t_5))) { PyObject* sequence = __pyx_t_5; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 3)) { if (size > 3) __Pyx_RaiseTooManyValuesError(3); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 470, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __pyx_t_2 = PyTuple_GET_ITEM(sequence, 2); } else { __pyx_t_3 = PyList_GET_ITEM(sequence, 0); __pyx_t_4 = PyList_GET_ITEM(sequence, 1); __pyx_t_2 = PyList_GET_ITEM(sequence, 2); } __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_2); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else { Py_ssize_t index = -1; __pyx_t_109 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_109)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_109); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_110 = Py_TYPE(__pyx_t_109)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_3)) goto __pyx_L24_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_4)) goto __pyx_L24_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); index = 2; __pyx_t_2 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_2)) goto __pyx_L24_unpacking_failed; __Pyx_GOTREF(__pyx_t_2); if (__Pyx_IternextUnpackEndCheck(__pyx_t_110(__pyx_t_109), 3) < 0) __PYX_ERR(0, 470, __pyx_L1_error) __pyx_t_110 = NULL; __Pyx_DECREF(__pyx_t_109); __pyx_t_109 = 0; goto __pyx_L25_unpacking_done; __pyx_L24_unpacking_failed:; __Pyx_DECREF(__pyx_t_109); __pyx_t_109 = 0; __pyx_t_110 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 470, __pyx_L1_error) __pyx_L25_unpacking_done:; } __pyx_t_24 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_3); if (unlikely((__pyx_t_24 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_25 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_4); if (unlikely((__pyx_t_25 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_m = __pyx_t_24; __pyx_v_j = __pyx_t_25; __Pyx_XDECREF_SET(__pyx_v_counts, __pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":473 * return_index=True, * return_counts=True)): * j_min[m] = j # <<<<<<<<<<<<<< * j_max[m] = j + counts * */ __pyx_t_113 = __pyx_v_m; *__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_113, __pyx_pybuffernd_j_min.diminfo[0].strides) = __pyx_v_j; /* "pysteps/motion/_vet.pyx":474 * return_counts=True)): * j_min[m] = j * j_max[m] = j + counts # <<<<<<<<<<<<<< * * cdef np.ndarray[float64, ndim = 2] morphed_image */ __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_j); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 474, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyNumber_Add(__pyx_t_5, __pyx_v_counts); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 474, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_25 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_2); if (unlikely((__pyx_t_25 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 474, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_114 = __pyx_v_m; *__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_114, __pyx_pybuffernd_j_max.diminfo[0].strides) = __pyx_t_25; /* "pysteps/motion/_vet.pyx":470 * i_max[l] = i + counts - 1 * * for m, j, counts in zip(*np.unique(m_j, # <<<<<<<<<<<<<< * return_index=True, * return_counts=True)): */ } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":483 * * cdef np.ndarray[float64, ndim = 2] buffer = \ * np.zeros([x_image_size, y_image_size], dtype=np.float64) # <<<<<<<<<<<<<< * * grad_smooth = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_5); __pyx_t_6 = 0; __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 483, __pyx_L1_error) __pyx_t_115 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer, (PyObject*)__pyx_t_115, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { __pyx_v_buffer = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_buffer.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 482, __pyx_L1_error) } else {__pyx_pybuffernd_buffer.diminfo[0].strides = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_buffer.diminfo[0].shape = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_buffer.diminfo[1].strides = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_buffer.diminfo[1].shape = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.shape[1]; } } __pyx_t_115 = 0; __pyx_v_buffer = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":485 * np.zeros([x_image_size, y_image_size], dtype=np.float64) * * grad_smooth = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) # <<<<<<<<<<<<<< * * grad_residuals = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyList_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_int_2); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_float64); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 485, __pyx_L1_error) __pyx_t_116 = ((PyArrayObject *)__pyx_t_6); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer, (PyObject*)__pyx_t_116, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer, (PyObject*)__pyx_v_grad_smooth, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd_grad_smooth.diminfo[0].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_grad_smooth.diminfo[0].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_grad_smooth.diminfo[1].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_grad_smooth.diminfo[1].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_grad_smooth.diminfo[2].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_grad_smooth.diminfo[2].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 485, __pyx_L1_error) } __pyx_t_116 = 0; __pyx_v_grad_smooth = ((PyArrayObject *)__pyx_t_6); __pyx_t_6 = 0; /* "pysteps/motion/_vet.pyx":487 * grad_smooth = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) * * grad_residuals = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) # <<<<<<<<<<<<<< * * cdef float64 residuals = 0 */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyList_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_int_2); __Pyx_GIVEREF(__pyx_t_6); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_4, 2, __pyx_t_5); __pyx_t_6 = 0; __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 487, __pyx_L1_error) __pyx_t_117 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer, (PyObject*)__pyx_t_117, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer, (PyObject*)__pyx_v_grad_residuals, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_grad_residuals.diminfo[0].strides = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_grad_residuals.diminfo[0].shape = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_grad_residuals.diminfo[1].strides = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_grad_residuals.diminfo[1].shape = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_grad_residuals.diminfo[2].strides = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_grad_residuals.diminfo[2].shape = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 487, __pyx_L1_error) } __pyx_t_117 = 0; __pyx_v_grad_residuals = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":489 * grad_residuals = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) * * cdef float64 residuals = 0 # <<<<<<<<<<<<<< * * # Compute residual part of the cost function */ __pyx_v_residuals = 0.0; /* "pysteps/motion/_vet.pyx":492 * * # Compute residual part of the cost function * if gradient: # <<<<<<<<<<<<<< * * morphed_image, morph_mask, _gradient_data = _warp(template_image, */ __pyx_t_1 = (__pyx_v_gradient != 0); if (__pyx_t_1) { /* "pysteps/motion/_vet.pyx":494 * if gradient: * * morphed_image, morph_mask, _gradient_data = _warp(template_image, # <<<<<<<<<<<<<< * mask, * displacement, */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_warp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "pysteps/motion/_vet.pyx":496 * morphed_image, morph_mask, _gradient_data = _warp(template_image, * mask, * displacement, # <<<<<<<<<<<<<< * gradient=True) * */ __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(((PyObject *)__pyx_v_template_image)); __Pyx_GIVEREF(((PyObject *)__pyx_v_template_image)); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_template_image)); __Pyx_INCREF(((PyObject *)__pyx_v_mask)); __Pyx_GIVEREF(((PyObject *)__pyx_v_mask)); PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_v_mask)); __Pyx_INCREF(((PyObject *)__pyx_v_displacement)); __Pyx_GIVEREF(((PyObject *)__pyx_v_displacement)); PyTuple_SET_ITEM(__pyx_t_4, 2, ((PyObject *)__pyx_v_displacement)); /* "pysteps/motion/_vet.pyx":497 * mask, * displacement, * gradient=True) # <<<<<<<<<<<<<< * * morph_mask[mask > 0] = 1 */ __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_gradient, Py_True) < 0) __PYX_ERR(0, 497, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":494 * if gradient: * * morphed_image, morph_mask, _gradient_data = _warp(template_image, # <<<<<<<<<<<<<< * mask, * displacement, */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 3)) { if (size > 3) __Pyx_RaiseTooManyValuesError(3); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 494, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_5 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 2); } else { __pyx_t_5 = PyList_GET_ITEM(sequence, 0); __pyx_t_4 = PyList_GET_ITEM(sequence, 1); __pyx_t_3 = PyList_GET_ITEM(sequence, 2); } __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { Py_ssize_t index = -1; __pyx_t_6 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_110 = Py_TYPE(__pyx_t_6)->tp_iternext; index = 0; __pyx_t_5 = __pyx_t_110(__pyx_t_6); if (unlikely(!__pyx_t_5)) goto __pyx_L27_unpacking_failed; __Pyx_GOTREF(__pyx_t_5); index = 1; __pyx_t_4 = __pyx_t_110(__pyx_t_6); if (unlikely(!__pyx_t_4)) goto __pyx_L27_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); index = 2; __pyx_t_3 = __pyx_t_110(__pyx_t_6); if (unlikely(!__pyx_t_3)) goto __pyx_L27_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); if (__Pyx_IternextUnpackEndCheck(__pyx_t_110(__pyx_t_6), 3) < 0) __PYX_ERR(0, 494, __pyx_L1_error) __pyx_t_110 = NULL; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L28_unpacking_done; __pyx_L27_unpacking_failed:; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_110 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 494, __pyx_L1_error) __pyx_L28_unpacking_done:; } if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 494, __pyx_L1_error) if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 494, __pyx_L1_error) if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 494, __pyx_L1_error) __pyx_t_118 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer, (PyObject*)__pyx_t_118, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_morphed_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd_morphed_image.diminfo[0].strides = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morphed_image.diminfo[0].shape = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morphed_image.diminfo[1].strides = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morphed_image.diminfo[1].shape = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 494, __pyx_L1_error) } __pyx_t_118 = 0; __pyx_v_morphed_image = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_t_119 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer, (PyObject*)__pyx_t_119, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_morph_mask, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_morph_mask.diminfo[0].strides = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morph_mask.diminfo[0].shape = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morph_mask.diminfo[1].strides = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morph_mask.diminfo[1].shape = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 494, __pyx_L1_error) } __pyx_t_119 = 0; __pyx_v_morph_mask = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; __pyx_t_120 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer, (PyObject*)__pyx_t_120, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer, (PyObject*)__pyx_v__gradient_data, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd__gradient_data.diminfo[0].strides = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd__gradient_data.diminfo[0].shape = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd__gradient_data.diminfo[1].strides = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd__gradient_data.diminfo[1].shape = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd__gradient_data.diminfo[2].strides = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd__gradient_data.diminfo[2].shape = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 494, __pyx_L1_error) } __pyx_t_120 = 0; __pyx_v__gradient_data = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":499 * gradient=True) * * morph_mask[mask > 0] = 1 # <<<<<<<<<<<<<< * * buffer = (2 * (input_image - morphed_image)) */ __pyx_t_2 = PyObject_RichCompare(((PyObject *)__pyx_v_mask), __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 499, __pyx_L1_error) if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_morph_mask), __pyx_t_2, __pyx_int_1) < 0)) __PYX_ERR(0, 499, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":501 * morph_mask[mask > 0] = 1 * * buffer = (2 * (input_image - morphed_image)) # <<<<<<<<<<<<<< * buffer[morph_mask == 1] = 0 * */ __pyx_t_2 = PyNumber_Subtract(((PyObject *)__pyx_v_input_image), ((PyObject *)__pyx_v_morphed_image)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_int_2, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 501, __pyx_L1_error) __pyx_t_115 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer, (PyObject*)__pyx_t_115, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer, (PyObject*)__pyx_v_buffer, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_buffer.diminfo[0].strides = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_buffer.diminfo[0].shape = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_buffer.diminfo[1].strides = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_buffer.diminfo[1].shape = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 501, __pyx_L1_error) } __pyx_t_115 = 0; __Pyx_DECREF_SET(__pyx_v_buffer, ((PyArrayObject *)__pyx_t_3)); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":502 * * buffer = (2 * (input_image - morphed_image)) * buffer[morph_mask == 1] = 0 # <<<<<<<<<<<<<< * * _gradient_data[0, :] *= buffer */ __pyx_t_3 = PyObject_RichCompare(((PyObject *)__pyx_v_morph_mask), __pyx_int_1, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 502, __pyx_L1_error) if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_buffer), __pyx_t_3, __pyx_int_0) < 0)) __PYX_ERR(0, 502, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "pysteps/motion/_vet.pyx":504 * buffer[morph_mask == 1] = 0 * * _gradient_data[0, :] *= buffer # <<<<<<<<<<<<<< * _gradient_data[1, :] *= buffer * */ __Pyx_INCREF(__pyx_tuple__4); __pyx_t_121 = __pyx_tuple__4; __pyx_t_3 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v__gradient_data), __pyx_t_121); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyNumber_InPlaceMultiply(__pyx_t_3, ((PyObject *)__pyx_v_buffer)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v__gradient_data), __pyx_t_121, __pyx_t_2) < 0)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_121); __pyx_t_121 = 0; /* "pysteps/motion/_vet.pyx":505 * * _gradient_data[0, :] *= buffer * _gradient_data[1, :] *= buffer # <<<<<<<<<<<<<< * * for l in range(x_sectors): # schedule='dynamic', nogil=True): */ __Pyx_INCREF(__pyx_tuple__5); __pyx_t_121 = __pyx_tuple__5; __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v__gradient_data), __pyx_t_121); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 505, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, ((PyObject *)__pyx_v_buffer)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 505, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v__gradient_data), __pyx_t_121, __pyx_t_3) < 0)) __PYX_ERR(0, 505, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_121); __pyx_t_121 = 0; /* "pysteps/motion/_vet.pyx":507 * _gradient_data[1, :] *= buffer * * for l in range(x_sectors): # schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * for m in range(y_sectors): * for i in range(i_min[l], i_max[l]): */ __pyx_t_25 = __pyx_v_x_sectors; __pyx_t_24 = __pyx_t_25; for (__pyx_t_23 = 0; __pyx_t_23 < __pyx_t_24; __pyx_t_23+=1) { __pyx_v_l = __pyx_t_23; /* "pysteps/motion/_vet.pyx":508 * * for l in range(x_sectors): # schedule='dynamic', nogil=True): * for m in range(y_sectors): # <<<<<<<<<<<<<< * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m], j_max[m]): */ __pyx_t_27 = __pyx_v_y_sectors; __pyx_t_28 = __pyx_t_27; for (__pyx_t_29 = 0; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) { __pyx_v_m = __pyx_t_29; /* "pysteps/motion/_vet.pyx":509 * for l in range(x_sectors): # schedule='dynamic', nogil=True): * for m in range(y_sectors): * for i in range(i_min[l], i_max[l]): # <<<<<<<<<<<<<< * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ */ __pyx_t_122 = __pyx_v_l; __pyx_t_79 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_122, __pyx_pybuffernd_i_max.diminfo[0].strides)); __pyx_t_123 = __pyx_v_l; __pyx_t_124 = __pyx_t_79; for (__pyx_t_125 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_123, __pyx_pybuffernd_i_min.diminfo[0].strides)); __pyx_t_125 < __pyx_t_124; __pyx_t_125+=1) { __pyx_v_i = __pyx_t_125; /* "pysteps/motion/_vet.pyx":510 * for m in range(y_sectors): * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m], j_max[m]): # <<<<<<<<<<<<<< * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ * (_gradient_data[0, i, j] */ __pyx_t_126 = __pyx_v_m; __pyx_t_127 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_126, __pyx_pybuffernd_j_max.diminfo[0].strides)); __pyx_t_128 = __pyx_v_m; __pyx_t_129 = __pyx_t_127; for (__pyx_t_130 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_128, __pyx_pybuffernd_j_min.diminfo[0].strides)); __pyx_t_130 < __pyx_t_129; __pyx_t_130+=1) { __pyx_v_j = __pyx_t_130; /* "pysteps/motion/_vet.pyx":511 * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[0, i, j] * * interp_coef[0, i, j]) */ __pyx_t_131 = 0; __pyx_t_132 = __pyx_v_l; __pyx_t_133 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":512 * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ * (_gradient_data[0, i, j] # <<<<<<<<<<<<<< * * interp_coef[0, i, j]) * */ __pyx_t_134 = 0; __pyx_t_135 = __pyx_v_i; __pyx_t_136 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":513 * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ * (_gradient_data[0, i, j] * * interp_coef[0, i, j]) # <<<<<<<<<<<<<< * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ */ __pyx_t_137 = 0; __pyx_t_138 = __pyx_v_i; __pyx_t_139 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":511 * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[0, i, j] * * interp_coef[0, i, j]) */ __pyx_t_140 = 0; __pyx_t_141 = __pyx_v_l; __pyx_t_142 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_140, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_141, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_142, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) = ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_131, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_132, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_133, __pyx_pybuffernd_grad_residuals.diminfo[2].strides)) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_134, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_135, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_136, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_137, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_138, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_139, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))); /* "pysteps/motion/_vet.pyx":515 * * interp_coef[0, i, j]) * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[1, i, j] * * interp_coef[0, i, j]) */ __pyx_t_143 = 1; __pyx_t_144 = __pyx_v_l; __pyx_t_145 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":516 * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ * (_gradient_data[1, i, j] # <<<<<<<<<<<<<< * * interp_coef[0, i, j]) * */ __pyx_t_146 = 1; __pyx_t_147 = __pyx_v_i; __pyx_t_148 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":517 * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ * (_gradient_data[1, i, j] * * interp_coef[0, i, j]) # <<<<<<<<<<<<<< * * for m in range(1, y_sectors): */ __pyx_t_149 = 0; __pyx_t_150 = __pyx_v_i; __pyx_t_151 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":515 * * interp_coef[0, i, j]) * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[1, i, j] * * interp_coef[0, i, j]) */ __pyx_t_152 = 1; __pyx_t_153 = __pyx_v_l; __pyx_t_154 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_152, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_153, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_154, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) = ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_143, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_144, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_145, __pyx_pybuffernd_grad_residuals.diminfo[2].strides)) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_146, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_147, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_148, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_149, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_150, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_151, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))); } } } /* "pysteps/motion/_vet.pyx":519 * * interp_coef[0, i, j]) * * for m in range(1, y_sectors): # <<<<<<<<<<<<<< * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m - 1], j_max[m - 1]): */ __pyx_t_27 = __pyx_v_y_sectors; __pyx_t_28 = __pyx_t_27; for (__pyx_t_29 = 1; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) { __pyx_v_m = __pyx_t_29; /* "pysteps/motion/_vet.pyx":520 * * for m in range(1, y_sectors): * for i in range(i_min[l], i_max[l]): # <<<<<<<<<<<<<< * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ */ __pyx_t_155 = __pyx_v_l; __pyx_t_79 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_155, __pyx_pybuffernd_i_max.diminfo[0].strides)); __pyx_t_156 = __pyx_v_l; __pyx_t_124 = __pyx_t_79; for (__pyx_t_125 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_156, __pyx_pybuffernd_i_min.diminfo[0].strides)); __pyx_t_125 < __pyx_t_124; __pyx_t_125+=1) { __pyx_v_i = __pyx_t_125; /* "pysteps/motion/_vet.pyx":521 * for m in range(1, y_sectors): * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m - 1], j_max[m - 1]): # <<<<<<<<<<<<<< * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ * (_gradient_data[0, i, j] */ __pyx_t_157 = (__pyx_v_m - 1); __pyx_t_127 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_157, __pyx_pybuffernd_j_max.diminfo[0].strides)); __pyx_t_158 = (__pyx_v_m - 1); __pyx_t_129 = __pyx_t_127; for (__pyx_t_130 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_158, __pyx_pybuffernd_j_min.diminfo[0].strides)); __pyx_t_130 < __pyx_t_129; __pyx_t_130+=1) { __pyx_v_j = __pyx_t_130; /* "pysteps/motion/_vet.pyx":522 * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[0, i, j] * * interp_coef[1, i, j]) */ __pyx_t_159 = 0; __pyx_t_160 = __pyx_v_l; __pyx_t_161 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":523 * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ * (_gradient_data[0, i, j] # <<<<<<<<<<<<<< * * interp_coef[1, i, j]) * */ __pyx_t_162 = 0; __pyx_t_163 = __pyx_v_i; __pyx_t_164 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":524 * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ * (_gradient_data[0, i, j] * * interp_coef[1, i, j]) # <<<<<<<<<<<<<< * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ */ __pyx_t_165 = 1; __pyx_t_166 = __pyx_v_i; __pyx_t_167 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":522 * for i in range(i_min[l], i_max[l]): * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[0, i, j] * * interp_coef[1, i, j]) */ __pyx_t_168 = 0; __pyx_t_169 = __pyx_v_l; __pyx_t_170 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_168, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_169, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_170, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) = ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_159, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_160, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_161, __pyx_pybuffernd_grad_residuals.diminfo[2].strides)) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_162, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_163, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_164, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_165, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_166, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_167, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))); /* "pysteps/motion/_vet.pyx":526 * * interp_coef[1, i, j]) * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[1, i, j] # TODO: Check this line! * * interp_coef[1, i, j]) */ __pyx_t_171 = 1; __pyx_t_172 = __pyx_v_l; __pyx_t_173 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":527 * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ * (_gradient_data[1, i, j] # TODO: Check this line! # <<<<<<<<<<<<<< * * interp_coef[1, i, j]) * */ __pyx_t_174 = 1; __pyx_t_175 = __pyx_v_i; __pyx_t_176 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":528 * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ * (_gradient_data[1, i, j] # TODO: Check this line! * * interp_coef[1, i, j]) # <<<<<<<<<<<<<< * * for l in range(1, x_sectors): #, schedule='dynamic', nogil=True): */ __pyx_t_177 = 1; __pyx_t_178 = __pyx_v_i; __pyx_t_179 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":526 * * interp_coef[1, i, j]) * * grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ # <<<<<<<<<<<<<< * (_gradient_data[1, i, j] # TODO: Check this line! * * interp_coef[1, i, j]) */ __pyx_t_180 = 1; __pyx_t_181 = __pyx_v_l; __pyx_t_182 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_180, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_181, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_182, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) = ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_171, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_172, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_173, __pyx_pybuffernd_grad_residuals.diminfo[2].strides)) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_174, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_175, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_176, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_177, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_178, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_179, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))); } } } } /* "pysteps/motion/_vet.pyx":530 * * interp_coef[1, i, j]) * * for l in range(1, x_sectors): #, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * for m in range(y_sectors): * for i in range(i_min[l - 1], i_max[l - 1]): */ __pyx_t_25 = __pyx_v_x_sectors; __pyx_t_24 = __pyx_t_25; for (__pyx_t_23 = 1; __pyx_t_23 < __pyx_t_24; __pyx_t_23+=1) { __pyx_v_l = __pyx_t_23; /* "pysteps/motion/_vet.pyx":531 * * for l in range(1, x_sectors): #, schedule='dynamic', nogil=True): * for m in range(y_sectors): # <<<<<<<<<<<<<< * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m], j_max[m]): */ __pyx_t_27 = __pyx_v_y_sectors; __pyx_t_28 = __pyx_t_27; for (__pyx_t_29 = 0; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) { __pyx_v_m = __pyx_t_29; /* "pysteps/motion/_vet.pyx":532 * for l in range(1, x_sectors): #, schedule='dynamic', nogil=True): * for m in range(y_sectors): * for i in range(i_min[l - 1], i_max[l - 1]): # <<<<<<<<<<<<<< * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] */ __pyx_t_183 = (__pyx_v_l - 1); __pyx_t_79 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_183, __pyx_pybuffernd_i_max.diminfo[0].strides)); __pyx_t_184 = (__pyx_v_l - 1); __pyx_t_124 = __pyx_t_79; for (__pyx_t_125 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_184, __pyx_pybuffernd_i_min.diminfo[0].strides)); __pyx_t_125 < __pyx_t_124; __pyx_t_125+=1) { __pyx_v_i = __pyx_t_125; /* "pysteps/motion/_vet.pyx":533 * for m in range(y_sectors): * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m], j_max[m]): # <<<<<<<<<<<<<< * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[2, i, j]) */ __pyx_t_185 = __pyx_v_m; __pyx_t_127 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_185, __pyx_pybuffernd_j_max.diminfo[0].strides)); __pyx_t_186 = __pyx_v_m; __pyx_t_129 = __pyx_t_127; for (__pyx_t_130 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_186, __pyx_pybuffernd_j_min.diminfo[0].strides)); __pyx_t_130 < __pyx_t_129; __pyx_t_130+=1) { __pyx_v_j = __pyx_t_130; /* "pysteps/motion/_vet.pyx":534 * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] # <<<<<<<<<<<<<< * * interp_coef[2, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] */ __pyx_t_187 = 0; __pyx_t_188 = __pyx_v_i; __pyx_t_189 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":535 * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[2, i, j]) # <<<<<<<<<<<<<< * grad_residuals[1, l, m] += (_gradient_data[1, i, j] * * interp_coef[2, i, j]) */ __pyx_t_190 = 2; __pyx_t_191 = __pyx_v_i; __pyx_t_192 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":534 * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m], j_max[m]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] # <<<<<<<<<<<<<< * * interp_coef[2, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] */ __pyx_t_193 = 0; __pyx_t_194 = __pyx_v_l; __pyx_t_195 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_193, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_194, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_195, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) += ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_187, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_188, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_189, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_190, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_191, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_192, __pyx_pybuffernd_interp_coef.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":536 * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[2, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] # <<<<<<<<<<<<<< * * interp_coef[2, i, j]) * */ __pyx_t_196 = 1; __pyx_t_197 = __pyx_v_i; __pyx_t_198 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":537 * * interp_coef[2, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] * * interp_coef[2, i, j]) # <<<<<<<<<<<<<< * * for m in range(1, y_sectors): */ __pyx_t_199 = 2; __pyx_t_200 = __pyx_v_i; __pyx_t_201 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":536 * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[2, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] # <<<<<<<<<<<<<< * * interp_coef[2, i, j]) * */ __pyx_t_202 = 1; __pyx_t_203 = __pyx_v_l; __pyx_t_204 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_202, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_203, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_204, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) += ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_196, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_197, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_198, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_199, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_200, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_201, __pyx_pybuffernd_interp_coef.diminfo[2].strides))); } } } /* "pysteps/motion/_vet.pyx":539 * * interp_coef[2, i, j]) * * for m in range(1, y_sectors): # <<<<<<<<<<<<<< * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m - 1], j_max[m - 1]): */ __pyx_t_27 = __pyx_v_y_sectors; __pyx_t_28 = __pyx_t_27; for (__pyx_t_29 = 1; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) { __pyx_v_m = __pyx_t_29; /* "pysteps/motion/_vet.pyx":540 * * for m in range(1, y_sectors): * for i in range(i_min[l - 1], i_max[l - 1]): # <<<<<<<<<<<<<< * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] */ __pyx_t_205 = (__pyx_v_l - 1); __pyx_t_79 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_205, __pyx_pybuffernd_i_max.diminfo[0].strides)); __pyx_t_206 = (__pyx_v_l - 1); __pyx_t_124 = __pyx_t_79; for (__pyx_t_125 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_206, __pyx_pybuffernd_i_min.diminfo[0].strides)); __pyx_t_125 < __pyx_t_124; __pyx_t_125+=1) { __pyx_v_i = __pyx_t_125; /* "pysteps/motion/_vet.pyx":541 * for m in range(1, y_sectors): * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m - 1], j_max[m - 1]): # <<<<<<<<<<<<<< * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[3, i, j]) */ __pyx_t_207 = (__pyx_v_m - 1); __pyx_t_127 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_207, __pyx_pybuffernd_j_max.diminfo[0].strides)); __pyx_t_208 = (__pyx_v_m - 1); __pyx_t_129 = __pyx_t_127; for (__pyx_t_130 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_208, __pyx_pybuffernd_j_min.diminfo[0].strides)); __pyx_t_130 < __pyx_t_129; __pyx_t_130+=1) { __pyx_v_j = __pyx_t_130; /* "pysteps/motion/_vet.pyx":542 * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] # <<<<<<<<<<<<<< * * interp_coef[3, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] */ __pyx_t_209 = 0; __pyx_t_210 = __pyx_v_i; __pyx_t_211 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":543 * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[3, i, j]) # <<<<<<<<<<<<<< * grad_residuals[1, l, m] += (_gradient_data[1, i, j] * * interp_coef[3, i, j]) */ __pyx_t_212 = 3; __pyx_t_213 = __pyx_v_i; __pyx_t_214 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":542 * for i in range(i_min[l - 1], i_max[l - 1]): * for j in range(j_min[m - 1], j_max[m - 1]): * grad_residuals[0, l, m] += (_gradient_data[0, i, j] # <<<<<<<<<<<<<< * * interp_coef[3, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] */ __pyx_t_215 = 0; __pyx_t_216 = __pyx_v_l; __pyx_t_217 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_215, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_216, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_217, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) += ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_209, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_210, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_211, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_212, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_213, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_214, __pyx_pybuffernd_interp_coef.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":544 * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[3, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] # <<<<<<<<<<<<<< * * interp_coef[3, i, j]) * */ __pyx_t_218 = 1; __pyx_t_219 = __pyx_v_i; __pyx_t_220 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":545 * * interp_coef[3, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] * * interp_coef[3, i, j]) # <<<<<<<<<<<<<< * * */ __pyx_t_221 = 3; __pyx_t_222 = __pyx_v_i; __pyx_t_223 = __pyx_v_j; /* "pysteps/motion/_vet.pyx":544 * grad_residuals[0, l, m] += (_gradient_data[0, i, j] * * interp_coef[3, i, j]) * grad_residuals[1, l, m] += (_gradient_data[1, i, j] # <<<<<<<<<<<<<< * * interp_coef[3, i, j]) * */ __pyx_t_224 = 1; __pyx_t_225 = __pyx_v_l; __pyx_t_226 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_224, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_225, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_226, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) += ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_218, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_219, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_220, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_221, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_222, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_223, __pyx_pybuffernd_interp_coef.diminfo[2].strides))); } } } } /* "pysteps/motion/_vet.pyx":492 * * # Compute residual part of the cost function * if gradient: # <<<<<<<<<<<<<< * * morphed_image, morph_mask, _gradient_data = _warp(template_image, */ goto __pyx_L26; } /* "pysteps/motion/_vet.pyx":550 * else: * * morphed_image, morph_mask = _warp(template_image, # <<<<<<<<<<<<<< * mask, * displacement, */ /*else*/ { __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_warp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "pysteps/motion/_vet.pyx":552 * morphed_image, morph_mask = _warp(template_image, * mask, * displacement, # <<<<<<<<<<<<<< * gradient=False) * morph_mask[mask > 0] = 1 */ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_template_image)); __Pyx_GIVEREF(((PyObject *)__pyx_v_template_image)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_template_image)); __Pyx_INCREF(((PyObject *)__pyx_v_mask)); __Pyx_GIVEREF(((PyObject *)__pyx_v_mask)); PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_mask)); __Pyx_INCREF(((PyObject *)__pyx_v_displacement)); __Pyx_GIVEREF(((PyObject *)__pyx_v_displacement)); PyTuple_SET_ITEM(__pyx_t_2, 2, ((PyObject *)__pyx_v_displacement)); /* "pysteps/motion/_vet.pyx":553 * mask, * displacement, * gradient=False) # <<<<<<<<<<<<<< * morph_mask[mask > 0] = 1 * residuals = np.sum((morphed_image - input_image)[morph_mask == 0] ** 2) */ __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 553, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_gradient, Py_False) < 0) __PYX_ERR(0, 553, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":550 * else: * * morphed_image, morph_mask = _warp(template_image, # <<<<<<<<<<<<<< * mask, * displacement, */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_5))) || (PyList_CheckExact(__pyx_t_5))) { PyObject* sequence = __pyx_t_5; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 550, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_4 = PyList_GET_ITEM(sequence, 0); __pyx_t_2 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_2); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else { Py_ssize_t index = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_110 = Py_TYPE(__pyx_t_3)->tp_iternext; index = 0; __pyx_t_4 = __pyx_t_110(__pyx_t_3); if (unlikely(!__pyx_t_4)) goto __pyx_L57_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); index = 1; __pyx_t_2 = __pyx_t_110(__pyx_t_3); if (unlikely(!__pyx_t_2)) goto __pyx_L57_unpacking_failed; __Pyx_GOTREF(__pyx_t_2); if (__Pyx_IternextUnpackEndCheck(__pyx_t_110(__pyx_t_3), 2) < 0) __PYX_ERR(0, 550, __pyx_L1_error) __pyx_t_110 = NULL; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L58_unpacking_done; __pyx_L57_unpacking_failed:; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_110 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 550, __pyx_L1_error) __pyx_L58_unpacking_done:; } if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 550, __pyx_L1_error) if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 550, __pyx_L1_error) __pyx_t_118 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer, (PyObject*)__pyx_t_118, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_morphed_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd_morphed_image.diminfo[0].strides = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morphed_image.diminfo[0].shape = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morphed_image.diminfo[1].strides = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morphed_image.diminfo[1].shape = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 550, __pyx_L1_error) } __pyx_t_118 = 0; __pyx_v_morphed_image = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; __pyx_t_119 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer, (PyObject*)__pyx_t_119, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_morph_mask, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0; } __pyx_pybuffernd_morph_mask.diminfo[0].strides = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morph_mask.diminfo[0].shape = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morph_mask.diminfo[1].strides = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morph_mask.diminfo[1].shape = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 550, __pyx_L1_error) } __pyx_t_119 = 0; __pyx_v_morph_mask = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "pysteps/motion/_vet.pyx":554 * displacement, * gradient=False) * morph_mask[mask > 0] = 1 # <<<<<<<<<<<<<< * residuals = np.sum((morphed_image - input_image)[morph_mask == 0] ** 2) * */ __pyx_t_5 = PyObject_RichCompare(((PyObject *)__pyx_v_mask), __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 554, __pyx_L1_error) if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_morph_mask), __pyx_t_5, __pyx_int_1) < 0)) __PYX_ERR(0, 554, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "pysteps/motion/_vet.pyx":555 * gradient=False) * morph_mask[mask > 0] = 1 * residuals = np.sum((morphed_image - input_image)[morph_mask == 0] ** 2) # <<<<<<<<<<<<<< * * # Compute smoothness constraint part of the cost function */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_sum); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Subtract(((PyObject *)__pyx_v_morphed_image), ((PyObject *)__pyx_v_input_image)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_RichCompare(((PyObject *)__pyx_v_morph_mask), __pyx_int_0, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 555, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Power(__pyx_t_6, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_5 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_6, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_227 = __pyx_PyFloat_AsDouble(__pyx_t_5); if (unlikely((__pyx_t_227 == ((npy_float64)-1)) && PyErr_Occurred())) __PYX_ERR(0, 555, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_residuals = __pyx_t_227; } __pyx_L26:; /* "pysteps/motion/_vet.pyx":558 * * # Compute smoothness constraint part of the cost function * cdef float64 smoothness_penalty = 0 # <<<<<<<<<<<<<< * * cdef float64 df_dx2 = 0 */ __pyx_v_smoothness_penalty = 0.0; /* "pysteps/motion/_vet.pyx":560 * cdef float64 smoothness_penalty = 0 * * cdef float64 df_dx2 = 0 # <<<<<<<<<<<<<< * cdef float64 df_dxdy = 0 * cdef float64 df_dy2 = 0 */ __pyx_v_df_dx2 = 0.0; /* "pysteps/motion/_vet.pyx":561 * * cdef float64 df_dx2 = 0 * cdef float64 df_dxdy = 0 # <<<<<<<<<<<<<< * cdef float64 df_dy2 = 0 * */ __pyx_v_df_dxdy = 0.0; /* "pysteps/motion/_vet.pyx":562 * cdef float64 df_dx2 = 0 * cdef float64 df_dxdy = 0 * cdef float64 df_dy2 = 0 # <<<<<<<<<<<<<< * * cdef float64 inloop_smoothness_penalty */ __pyx_v_df_dy2 = 0.0; /* "pysteps/motion/_vet.pyx":566 * cdef float64 inloop_smoothness_penalty * * if smooth_gain > 0.: # <<<<<<<<<<<<<< * * for axis in range(2): #, schedule='dynamic', nogil=True): */ __pyx_t_1 = ((__pyx_v_smooth_gain > 0.) != 0); if (__pyx_t_1) { /* "pysteps/motion/_vet.pyx":568 * if smooth_gain > 0.: * * for axis in range(2): #, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<< * * inloop_smoothness_penalty = 0 */ for (__pyx_t_25 = 0; __pyx_t_25 < 2; __pyx_t_25+=1) { __pyx_v_axis = __pyx_t_25; /* "pysteps/motion/_vet.pyx":570 * for axis in range(2): #, schedule='dynamic', nogil=True): * * inloop_smoothness_penalty = 0 # <<<<<<<<<<<<<< * * for l in range(1, x_sectors - 1): */ __pyx_v_inloop_smoothness_penalty = 0.0; /* "pysteps/motion/_vet.pyx":572 * inloop_smoothness_penalty = 0 * * for l in range(1, x_sectors - 1): # <<<<<<<<<<<<<< * * for m in range(1, y_sectors - 1): */ __pyx_t_228 = (__pyx_v_x_sectors - 1); __pyx_t_229 = __pyx_t_228; for (__pyx_t_24 = 1; __pyx_t_24 < __pyx_t_229; __pyx_t_24+=1) { __pyx_v_l = __pyx_t_24; /* "pysteps/motion/_vet.pyx":574 * for l in range(1, x_sectors - 1): * * for m in range(1, y_sectors - 1): # <<<<<<<<<<<<<< * df_dx2 = (sector_displacement[axis, l + 1, m] * - 2 * sector_displacement[axis, l, m] */ __pyx_t_230 = (__pyx_v_y_sectors - 1); __pyx_t_231 = __pyx_t_230; for (__pyx_t_23 = 1; __pyx_t_23 < __pyx_t_231; __pyx_t_23+=1) { __pyx_v_m = __pyx_t_23; /* "pysteps/motion/_vet.pyx":575 * * for m in range(1, y_sectors - 1): * df_dx2 = (sector_displacement[axis, l + 1, m] # <<<<<<<<<<<<<< * - 2 * sector_displacement[axis, l, m] * + sector_displacement[axis, l - 1, m]) */ __pyx_t_232 = __pyx_v_axis; __pyx_t_233 = (__pyx_v_l + 1); __pyx_t_234 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":576 * for m in range(1, y_sectors - 1): * df_dx2 = (sector_displacement[axis, l + 1, m] * - 2 * sector_displacement[axis, l, m] # <<<<<<<<<<<<<< * + sector_displacement[axis, l - 1, m]) * */ __pyx_t_235 = __pyx_v_axis; __pyx_t_236 = __pyx_v_l; __pyx_t_237 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":577 * df_dx2 = (sector_displacement[axis, l + 1, m] * - 2 * sector_displacement[axis, l, m] * + sector_displacement[axis, l - 1, m]) # <<<<<<<<<<<<<< * * df_dx2 = df_dx2 / (x_sector_size * x_sector_size) */ __pyx_t_238 = __pyx_v_axis; __pyx_t_239 = (__pyx_v_l - 1); __pyx_t_240 = __pyx_v_m; __pyx_v_df_dx2 = (((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_232, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_233, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_234, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) - (2.0 * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_235, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_236, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_237, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)))) + (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_238, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_239, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_240, __pyx_pybuffernd_sector_displacement.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":579 * + sector_displacement[axis, l - 1, m]) * * df_dx2 = df_dx2 / (x_sector_size * x_sector_size) # <<<<<<<<<<<<<< * * df_dy2 = (sector_displacement[axis, l, m + 1] */ __pyx_v_df_dx2 = (__pyx_v_df_dx2 / ((__pyx_t_7pysteps_6motion_4_vet_float64)(__pyx_v_x_sector_size * __pyx_v_x_sector_size))); /* "pysteps/motion/_vet.pyx":581 * df_dx2 = df_dx2 / (x_sector_size * x_sector_size) * * df_dy2 = (sector_displacement[axis, l, m + 1] # <<<<<<<<<<<<<< * - 2 * sector_displacement[axis, l, m] * + sector_displacement[axis, l, m - 1]) */ __pyx_t_241 = __pyx_v_axis; __pyx_t_242 = __pyx_v_l; __pyx_t_243 = (__pyx_v_m + 1); /* "pysteps/motion/_vet.pyx":582 * * df_dy2 = (sector_displacement[axis, l, m + 1] * - 2 * sector_displacement[axis, l, m] # <<<<<<<<<<<<<< * + sector_displacement[axis, l, m - 1]) * */ __pyx_t_244 = __pyx_v_axis; __pyx_t_245 = __pyx_v_l; __pyx_t_246 = __pyx_v_m; /* "pysteps/motion/_vet.pyx":583 * df_dy2 = (sector_displacement[axis, l, m + 1] * - 2 * sector_displacement[axis, l, m] * + sector_displacement[axis, l, m - 1]) # <<<<<<<<<<<<<< * * df_dy2 = df_dy2 / (y_sector_size * y_sector_size) */ __pyx_t_247 = __pyx_v_axis; __pyx_t_248 = __pyx_v_l; __pyx_t_249 = (__pyx_v_m - 1); __pyx_v_df_dy2 = (((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_241, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_242, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_243, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) - (2.0 * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_244, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_245, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_246, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)))) + (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_247, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_248, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_249, __pyx_pybuffernd_sector_displacement.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":585 * + sector_displacement[axis, l, m - 1]) * * df_dy2 = df_dy2 / (y_sector_size * y_sector_size) # <<<<<<<<<<<<<< * * df_dxdy = (sector_displacement[axis, l + 1, m + 1] */ __pyx_v_df_dy2 = (__pyx_v_df_dy2 / ((__pyx_t_7pysteps_6motion_4_vet_float64)(__pyx_v_y_sector_size * __pyx_v_y_sector_size))); /* "pysteps/motion/_vet.pyx":587 * df_dy2 = df_dy2 / (y_sector_size * y_sector_size) * * df_dxdy = (sector_displacement[axis, l + 1, m + 1] # <<<<<<<<<<<<<< * - sector_displacement[axis, l + 1, m - 1] * - sector_displacement[axis, l - 1, m + 1] */ __pyx_t_250 = __pyx_v_axis; __pyx_t_251 = (__pyx_v_l + 1); __pyx_t_252 = (__pyx_v_m + 1); /* "pysteps/motion/_vet.pyx":588 * * df_dxdy = (sector_displacement[axis, l + 1, m + 1] * - sector_displacement[axis, l + 1, m - 1] # <<<<<<<<<<<<<< * - sector_displacement[axis, l - 1, m + 1] * + sector_displacement[axis, l - 1, m - 1]) */ __pyx_t_253 = __pyx_v_axis; __pyx_t_254 = (__pyx_v_l + 1); __pyx_t_255 = (__pyx_v_m - 1); /* "pysteps/motion/_vet.pyx":589 * df_dxdy = (sector_displacement[axis, l + 1, m + 1] * - sector_displacement[axis, l + 1, m - 1] * - sector_displacement[axis, l - 1, m + 1] # <<<<<<<<<<<<<< * + sector_displacement[axis, l - 1, m - 1]) * df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size) */ __pyx_t_256 = __pyx_v_axis; __pyx_t_257 = (__pyx_v_l - 1); __pyx_t_258 = (__pyx_v_m + 1); /* "pysteps/motion/_vet.pyx":590 * - sector_displacement[axis, l + 1, m - 1] * - sector_displacement[axis, l - 1, m + 1] * + sector_displacement[axis, l - 1, m - 1]) # <<<<<<<<<<<<<< * df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size) * */ __pyx_t_259 = __pyx_v_axis; __pyx_t_260 = (__pyx_v_l - 1); __pyx_t_261 = (__pyx_v_m - 1); __pyx_v_df_dxdy = ((((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_250, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_251, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_252, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) - (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_253, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_254, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_255, __pyx_pybuffernd_sector_displacement.diminfo[2].strides))) - (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_256, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_257, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_258, __pyx_pybuffernd_sector_displacement.diminfo[2].strides))) + (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_259, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_260, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_261, __pyx_pybuffernd_sector_displacement.diminfo[2].strides))); /* "pysteps/motion/_vet.pyx":591 * - sector_displacement[axis, l - 1, m + 1] * + sector_displacement[axis, l - 1, m - 1]) * df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size) # <<<<<<<<<<<<<< * * if gradient: */ __pyx_v_df_dxdy = (__pyx_v_df_dxdy / ((__pyx_t_7pysteps_6motion_4_vet_float64)((4 * __pyx_v_x_sector_size) * __pyx_v_y_sector_size))); /* "pysteps/motion/_vet.pyx":593 * df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size) * * if gradient: # <<<<<<<<<<<<<< * grad_smooth[axis, l, m] -= 2 * df_dx2 * grad_smooth[axis, l + 1, m] += df_dx2 */ __pyx_t_1 = (__pyx_v_gradient != 0); if (__pyx_t_1) { /* "pysteps/motion/_vet.pyx":594 * * if gradient: * grad_smooth[axis, l, m] -= 2 * df_dx2 # <<<<<<<<<<<<<< * grad_smooth[axis, l + 1, m] += df_dx2 * grad_smooth[axis, l - 1, m] += df_dx2 */ __pyx_t_262 = __pyx_v_axis; __pyx_t_263 = __pyx_v_l; __pyx_t_264 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_262, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_263, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_264, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) -= (2.0 * __pyx_v_df_dx2); /* "pysteps/motion/_vet.pyx":595 * if gradient: * grad_smooth[axis, l, m] -= 2 * df_dx2 * grad_smooth[axis, l + 1, m] += df_dx2 # <<<<<<<<<<<<<< * grad_smooth[axis, l - 1, m] += df_dx2 * */ __pyx_t_265 = __pyx_v_axis; __pyx_t_266 = (__pyx_v_l + 1); __pyx_t_267 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_265, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_266, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_267, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dx2; /* "pysteps/motion/_vet.pyx":596 * grad_smooth[axis, l, m] -= 2 * df_dx2 * grad_smooth[axis, l + 1, m] += df_dx2 * grad_smooth[axis, l - 1, m] += df_dx2 # <<<<<<<<<<<<<< * * grad_smooth[axis, l, m] -= 2 * df_dy2 */ __pyx_t_268 = __pyx_v_axis; __pyx_t_269 = (__pyx_v_l - 1); __pyx_t_270 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_268, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_269, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_270, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dx2; /* "pysteps/motion/_vet.pyx":598 * grad_smooth[axis, l - 1, m] += df_dx2 * * grad_smooth[axis, l, m] -= 2 * df_dy2 # <<<<<<<<<<<<<< * grad_smooth[axis, l, m - 1] += df_dy2 * grad_smooth[axis, l, m + 1] += df_dy2 */ __pyx_t_271 = __pyx_v_axis; __pyx_t_272 = __pyx_v_l; __pyx_t_273 = __pyx_v_m; *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_271, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_272, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_273, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) -= (2.0 * __pyx_v_df_dy2); /* "pysteps/motion/_vet.pyx":599 * * grad_smooth[axis, l, m] -= 2 * df_dy2 * grad_smooth[axis, l, m - 1] += df_dy2 # <<<<<<<<<<<<<< * grad_smooth[axis, l, m + 1] += df_dy2 * */ __pyx_t_274 = __pyx_v_axis; __pyx_t_275 = __pyx_v_l; __pyx_t_276 = (__pyx_v_m - 1); *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_274, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_275, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_276, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dy2; /* "pysteps/motion/_vet.pyx":600 * grad_smooth[axis, l, m] -= 2 * df_dy2 * grad_smooth[axis, l, m - 1] += df_dy2 * grad_smooth[axis, l, m + 1] += df_dy2 # <<<<<<<<<<<<<< * * grad_smooth[axis, l - 1, m - 1] += df_dxdy */ __pyx_t_277 = __pyx_v_axis; __pyx_t_278 = __pyx_v_l; __pyx_t_279 = (__pyx_v_m + 1); *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_277, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_278, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_279, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dy2; /* "pysteps/motion/_vet.pyx":602 * grad_smooth[axis, l, m + 1] += df_dy2 * * grad_smooth[axis, l - 1, m - 1] += df_dxdy # <<<<<<<<<<<<<< * grad_smooth[axis, l - 1, m + 1] -= df_dxdy * grad_smooth[axis, l + 1, m - 1] -= df_dxdy */ __pyx_t_280 = __pyx_v_axis; __pyx_t_281 = (__pyx_v_l - 1); __pyx_t_282 = (__pyx_v_m - 1); *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_280, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_281, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_282, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dxdy; /* "pysteps/motion/_vet.pyx":603 * * grad_smooth[axis, l - 1, m - 1] += df_dxdy * grad_smooth[axis, l - 1, m + 1] -= df_dxdy # <<<<<<<<<<<<<< * grad_smooth[axis, l + 1, m - 1] -= df_dxdy * grad_smooth[axis, l + 1, m + 1] += df_dxdy */ __pyx_t_283 = __pyx_v_axis; __pyx_t_284 = (__pyx_v_l - 1); __pyx_t_285 = (__pyx_v_m + 1); *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_283, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_284, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_285, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) -= __pyx_v_df_dxdy; /* "pysteps/motion/_vet.pyx":604 * grad_smooth[axis, l - 1, m - 1] += df_dxdy * grad_smooth[axis, l - 1, m + 1] -= df_dxdy * grad_smooth[axis, l + 1, m - 1] -= df_dxdy # <<<<<<<<<<<<<< * grad_smooth[axis, l + 1, m + 1] += df_dxdy * */ __pyx_t_286 = __pyx_v_axis; __pyx_t_287 = (__pyx_v_l + 1); __pyx_t_288 = (__pyx_v_m - 1); *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_286, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_287, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_288, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) -= __pyx_v_df_dxdy; /* "pysteps/motion/_vet.pyx":605 * grad_smooth[axis, l - 1, m + 1] -= df_dxdy * grad_smooth[axis, l + 1, m - 1] -= df_dxdy * grad_smooth[axis, l + 1, m + 1] += df_dxdy # <<<<<<<<<<<<<< * * inloop_smoothness_penalty = (df_dx2 * df_dx2 */ __pyx_t_289 = __pyx_v_axis; __pyx_t_290 = (__pyx_v_l + 1); __pyx_t_291 = (__pyx_v_m + 1); *__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_289, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_290, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_291, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dxdy; /* "pysteps/motion/_vet.pyx":593 * df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size) * * if gradient: # <<<<<<<<<<<<<< * grad_smooth[axis, l, m] -= 2 * df_dx2 * grad_smooth[axis, l + 1, m] += df_dx2 */ } /* "pysteps/motion/_vet.pyx":609 * inloop_smoothness_penalty = (df_dx2 * df_dx2 * + 2 * df_dxdy * df_dxdy * + df_dy2 * df_dy2) # <<<<<<<<<<<<<< * * smoothness_penalty += inloop_smoothness_penalty */ __pyx_v_inloop_smoothness_penalty = (((__pyx_v_df_dx2 * __pyx_v_df_dx2) + ((2.0 * __pyx_v_df_dxdy) * __pyx_v_df_dxdy)) + (__pyx_v_df_dy2 * __pyx_v_df_dy2)); /* "pysteps/motion/_vet.pyx":611 * + df_dy2 * df_dy2) * * smoothness_penalty += inloop_smoothness_penalty # <<<<<<<<<<<<<< * * smoothness_penalty *= smooth_gain #* x_sector_size * y_sector_size */ __pyx_v_smoothness_penalty = (__pyx_v_smoothness_penalty + __pyx_v_inloop_smoothness_penalty); } } } /* "pysteps/motion/_vet.pyx":613 * smoothness_penalty += inloop_smoothness_penalty * * smoothness_penalty *= smooth_gain #* x_sector_size * y_sector_size # <<<<<<<<<<<<<< * * if gradient: */ __pyx_v_smoothness_penalty = (__pyx_v_smoothness_penalty * __pyx_v_smooth_gain); /* "pysteps/motion/_vet.pyx":566 * cdef float64 inloop_smoothness_penalty * * if smooth_gain > 0.: # <<<<<<<<<<<<<< * * for axis in range(2): #, schedule='dynamic', nogil=True): */ } /* "pysteps/motion/_vet.pyx":615 * smoothness_penalty *= smooth_gain #* x_sector_size * y_sector_size * * if gradient: # <<<<<<<<<<<<<< * grad_smooth *= 2 * smooth_gain #* x_sector_size * y_sector_size * */ __pyx_t_1 = (__pyx_v_gradient != 0); if (__pyx_t_1) { /* "pysteps/motion/_vet.pyx":616 * * if gradient: * grad_smooth *= 2 * smooth_gain #* x_sector_size * y_sector_size # <<<<<<<<<<<<<< * * return grad_residuals + grad_smooth */ __pyx_t_5 = PyFloat_FromDouble((2.0 * __pyx_v_smooth_gain)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyNumber_InPlaceMultiply(((PyObject *)__pyx_v_grad_smooth), __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 616, __pyx_L1_error) __pyx_t_116 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer); __pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer, (PyObject*)__pyx_t_116, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack); if (unlikely(__pyx_t_9 < 0)) { PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer, (PyObject*)__pyx_v_grad_smooth, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10); } __pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0; } __pyx_pybuffernd_grad_smooth.diminfo[0].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_grad_smooth.diminfo[0].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_grad_smooth.diminfo[1].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_grad_smooth.diminfo[1].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_grad_smooth.diminfo[2].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_grad_smooth.diminfo[2].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 616, __pyx_L1_error) } __pyx_t_116 = 0; __Pyx_DECREF_SET(__pyx_v_grad_smooth, ((PyArrayObject *)__pyx_t_4)); __pyx_t_4 = 0; /* "pysteps/motion/_vet.pyx":618 * grad_smooth *= 2 * smooth_gain #* x_sector_size * y_sector_size * * return grad_residuals + grad_smooth # <<<<<<<<<<<<<< * else: * return residuals, smoothness_penalty */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = PyNumber_Add(((PyObject *)__pyx_v_grad_residuals), ((PyObject *)__pyx_v_grad_smooth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 618, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "pysteps/motion/_vet.pyx":615 * smoothness_penalty *= smooth_gain #* x_sector_size * y_sector_size * * if gradient: # <<<<<<<<<<<<<< * grad_smooth *= 2 * smooth_gain #* x_sector_size * y_sector_size * */ } /* "pysteps/motion/_vet.pyx":620 * return grad_residuals + grad_smooth * else: * return residuals, smoothness_penalty # <<<<<<<<<<<<<< */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_residuals); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyFloat_FromDouble(__pyx_v_smoothness_penalty); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "pysteps/motion/_vet.pyx":240 * @cython.nonecheck(False) * @cython.cdivision(True) * def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<< * np.ndarray[float64, ndim=2] template_image, * np.ndarray[float64, ndim=2] input_image, */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_109); __Pyx_XDECREF(__pyx_t_121); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_i_max.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_i_min.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_input_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_j_max.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_j_min.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_l_i.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m_j.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_template_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("pysteps.motion._vet._cost_function", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_i_max.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_i_min.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_input_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_j_max.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_j_min.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_l_i.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m_j.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_template_image.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_displacement); __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_x_guess); __Pyx_XDECREF((PyObject *)__pyx_v_y_guess); __Pyx_XDECREF((PyObject *)__pyx_v_interp_coef); __Pyx_XDECREF((PyObject *)__pyx_v_l_i); __Pyx_XDECREF((PyObject *)__pyx_v_m_j); __Pyx_XDECREF((PyObject *)__pyx_v_i_min); __Pyx_XDECREF((PyObject *)__pyx_v_i_max); __Pyx_XDECREF((PyObject *)__pyx_v_j_min); __Pyx_XDECREF((PyObject *)__pyx_v_j_max); __Pyx_XDECREF(__pyx_v_counts); __Pyx_XDECREF((PyObject *)__pyx_v_morphed_image); __Pyx_XDECREF((PyObject *)__pyx_v_morph_mask); __Pyx_XDECREF((PyObject *)__pyx_v__gradient_data); __Pyx_XDECREF((PyObject *)__pyx_v_grad_residuals); __Pyx_XDECREF((PyObject *)__pyx_v_grad_smooth); __Pyx_XDECREF((PyObject *)__pyx_v_buffer); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":258 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fulfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; PyArray_Descr *__pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 * * cdef int i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":266 * cdef int i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":268 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":271 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (unlikely(__pyx_t_1)) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 272, __pyx_L1_error) /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (unlikely(__pyx_t_1)) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 276, __pyx_L1_error) /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285 * info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L9; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L9:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = <dtype>PyArray_DESCR(self) * cdef int offset */ __pyx_v_f = NULL; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 * cdef int t * cdef char* f = NULL * cdef dtype descr = <dtype>PyArray_DESCR(self) # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_7 = PyArray_DESCR(__pyx_v_self); __pyx_t_3 = ((PyObject *)__pyx_t_7); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300 * cdef int offset * * info.obj = self # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(descr): */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.obj = self * * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(PyDataType_HASFIELDS(__pyx_v_descr) != 0)) != 0); if (__pyx_t_1) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":303 * * if not PyDataType_HASFIELDS(descr): * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L15_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L14_bool_binop_done; } __pyx_L15_next_or:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L14_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L14_bool_binop_done:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (unlikely(__pyx_t_1)) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":306 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 306, __pyx_L1_error) /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; case NPY_UBYTE: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_v_f = ((char *)"B"); break; case NPY_SHORT: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_v_f = ((char *)"h"); break; case NPY_USHORT: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_v_f = ((char *)"H"); break; case NPY_INT: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_v_f = ((char *)"i"); break; case NPY_UINT: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":312 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_v_f = ((char *)"I"); break; case NPY_LONG: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":313 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_v_f = ((char *)"l"); break; case NPY_ULONG: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":314 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_v_f = ((char *)"L"); break; case NPY_LONGLONG: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":315 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_v_f = ((char *)"q"); break; case NPY_ULONGLONG: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":316 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_v_f = ((char *)"Q"); break; case NPY_FLOAT: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":317 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_v_f = ((char *)"f"); break; case NPY_DOUBLE: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":318 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_v_f = ((char *)"d"); break; case NPY_LONGDOUBLE: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":319 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_v_f = ((char *)"g"); break; case NPY_CFLOAT: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":320 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_v_f = ((char *)"Zf"); break; case NPY_CDOUBLE: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":321 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_v_f = ((char *)"Zd"); break; case NPY_CLONGDOUBLE: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":322 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_v_f = ((char *)"Zg"); break; case NPY_OBJECT: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":323 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_v_f = ((char *)"O"); break; default: /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":325 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 325, __pyx_L1_error) break; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":326 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":327 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.obj = self * * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":329 * return * else: * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":330 * else: * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":331 * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":332 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 332, __pyx_L1_error) __pyx_v_f = __pyx_t_9; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":335 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":258 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fulfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":337 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":338 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":339 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) */ PyObject_Free(__pyx_v_info->format); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":338 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":340 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":341 * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ PyObject_Free(__pyx_v_info->strides); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":340 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":337 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":821 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":821 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":824 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 825, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":824 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":828 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":830 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":831 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":830 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":833 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":834 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline tuple PyDataType_SHAPE(dtype d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":833 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":836 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return <tuple>d.subarray.shape * else: */ __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); if (__pyx_t_1) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838 * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape # <<<<<<<<<<<<<< * else: * return () */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); goto __pyx_L0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return <tuple>d.subarray.shape * else: */ } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840 * return <tuple>d.subarray.shape * else: * return () # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_empty_tuple); __pyx_r = __pyx_empty_tuple; goto __pyx_L0; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":836 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":848 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 851, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 852, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 853, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 853, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (unlikely(__pyx_t_6)) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 856, __pyx_L1_error) /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (unlikely(__pyx_t_6)) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 860, __pyx_L1_error) /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":870 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 870, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 870, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":871 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":872 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":875 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":877 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":878 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 878, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":879 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (unlikely(__pyx_t_6)) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":880 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 880, __pyx_L1_error) /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":879 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":883 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 883, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 883, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 883, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":884 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":885 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 885, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 885, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 885, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":886 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 886, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 886, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 886, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":887 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 887, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 887, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 887, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":888 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 888, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 888, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 888, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":889 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 889, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 889, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 889, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":890 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":891 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":892 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 892, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 892, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 892, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":893 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 893, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 893, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 893, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":894 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 894, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 894, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 894, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":895 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 895, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 895, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 895, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":896 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 896, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 896, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":897 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 897, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 897, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":898 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 898, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 898, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 898, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":899 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 899, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 899, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 899, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (likely(__pyx_t_6)) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":901 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 901, __pyx_L1_error) } __pyx_L15:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":902 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":877 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":906 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 906, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":907 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * int _import_umath() except -1 * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("set_array_base", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023 * * cdef inline void set_array_base(ndarray arr, object base): * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< * PyArray_SetBaseObject(arr, base) * */ Py_INCREF(__pyx_v_base); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024 * cdef inline void set_array_base(ndarray arr, object base): * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * int _import_umath() except -1 * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1026 * PyArray_SetBaseObject(arr, base) * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * base = PyArray_BASE(arr) * if base is NULL: */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_v_base; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1027 * * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< * if base is NULL: * return None */ __pyx_v_base = PyArray_BASE(__pyx_v_arr); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1028 * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) * if base is NULL: # <<<<<<<<<<<<<< * return None * return <object>base */ __pyx_t_1 = ((__pyx_v_base == NULL) != 0); if (__pyx_t_1) { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1029 * base = PyArray_BASE(arr) * if base is NULL: * return None # <<<<<<<<<<<<<< * return <object>base * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1028 * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) * if base is NULL: # <<<<<<<<<<<<<< * return None * return <object>base */ } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1030 * if base is NULL: * return None * return <object>base # <<<<<<<<<<<<<< * * # Versions of the import_* functions which are more suitable for */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_base)); __pyx_r = ((PyObject *)__pyx_v_base); goto __pyx_L0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1026 * PyArray_SetBaseObject(arr, base) * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * base = PyArray_BASE(arr) * if base is NULL: */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1034 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_array", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1036 * cdef inline int import_array() except -1: * try: * _import_array() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.multiarray failed to import") */ __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1036, __pyx_L3_error) /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1037 * try: * _import_array() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.multiarray failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1037, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1038 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1038, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1038, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1034 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1040 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_umath", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1042 * cdef inline int import_umath() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1042, __pyx_L3_error) /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1043 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1043, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1044 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1044, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1044, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1040 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_ufunc", 0); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1048 * cdef inline int import_ufunc() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1048, __pyx_L3_error) /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1049 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1049, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1050 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1050, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1050, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec__vet(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec__vet}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "_vet", __pyx_k_Cython_module_for_morphing_and, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_Error_computing_cost_function, __pyx_k_Error_computing_cost_function, sizeof(__pyx_k_Error_computing_cost_function), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_kp_u_The_number_of_sectors_in_x_axis, __pyx_k_The_number_of_sectors_in_x_axis, sizeof(__pyx_k_The_number_of_sectors_in_x_axis), 0, 1, 0, 0}, {&__pyx_kp_u_The_number_of_sectors_in_y_axis, __pyx_k_The_number_of_sectors_in_y_axis, sizeof(__pyx_k_The_number_of_sectors_in_y_axis), 0, 1, 0, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_arange, __pyx_k_arange, sizeof(__pyx_k_arange), 0, 0, 1, 1}, {&__pyx_n_s_axis, __pyx_k_axis, sizeof(__pyx_k_axis), 0, 0, 1, 1}, {&__pyx_n_s_buffer, __pyx_k_buffer, sizeof(__pyx_k_buffer), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_cost_function, __pyx_k_cost_function, sizeof(__pyx_k_cost_function), 0, 0, 1, 1}, {&__pyx_n_s_counts, __pyx_k_counts, sizeof(__pyx_k_counts), 0, 0, 1, 1}, {&__pyx_n_s_df_dx2, __pyx_k_df_dx2, sizeof(__pyx_k_df_dx2), 0, 0, 1, 1}, {&__pyx_n_s_df_dxdy, __pyx_k_df_dxdy, sizeof(__pyx_k_df_dxdy), 0, 0, 1, 1}, {&__pyx_n_s_df_dy2, __pyx_k_df_dy2, sizeof(__pyx_k_df_dy2), 0, 0, 1, 1}, {&__pyx_n_s_displacement, __pyx_k_displacement, sizeof(__pyx_k_displacement), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dx, __pyx_k_dx, sizeof(__pyx_k_dx), 0, 0, 1, 1}, {&__pyx_n_s_dy, __pyx_k_dy, sizeof(__pyx_k_dy), 0, 0, 1, 1}, {&__pyx_n_s_f00, __pyx_k_f00, sizeof(__pyx_k_f00), 0, 0, 1, 1}, {&__pyx_n_s_f01, __pyx_k_f01, sizeof(__pyx_k_f01), 0, 0, 1, 1}, {&__pyx_n_s_f10, __pyx_k_f10, sizeof(__pyx_k_f10), 0, 0, 1, 1}, {&__pyx_n_s_f11, __pyx_k_f11, sizeof(__pyx_k_f11), 0, 0, 1, 1}, {&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1}, {&__pyx_n_u_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 1, 0, 1}, {&__pyx_n_s_full, __pyx_k_full, sizeof(__pyx_k_full), 0, 0, 1, 1}, {&__pyx_n_s_grad_residuals, __pyx_k_grad_residuals, sizeof(__pyx_k_grad_residuals), 0, 0, 1, 1}, {&__pyx_n_s_grad_smooth, __pyx_k_grad_smooth, sizeof(__pyx_k_grad_smooth), 0, 0, 1, 1}, {&__pyx_n_s_gradient, __pyx_k_gradient, sizeof(__pyx_k_gradient), 0, 0, 1, 1}, {&__pyx_n_s_gradient_data, __pyx_k_gradient_data, sizeof(__pyx_k_gradient_data), 0, 0, 1, 1}, {&__pyx_n_s_gradient_values, __pyx_k_gradient_values, sizeof(__pyx_k_gradient_values), 0, 0, 1, 1}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_i_max, __pyx_k_i_max, sizeof(__pyx_k_i_max), 0, 0, 1, 1}, {&__pyx_n_s_i_min, __pyx_k_i_min, sizeof(__pyx_k_i_min), 0, 0, 1, 1}, {&__pyx_n_s_i_sec, __pyx_k_i_sec, sizeof(__pyx_k_i_sec), 0, 0, 1, 1}, {&__pyx_n_s_i_shift, __pyx_k_i_shift, sizeof(__pyx_k_i_shift), 0, 0, 1, 1}, {&__pyx_n_s_image, __pyx_k_image, sizeof(__pyx_k_image), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_inloop_smoothness_penalty, __pyx_k_inloop_smoothness_penalty, sizeof(__pyx_k_inloop_smoothness_penalty), 0, 0, 1, 1}, {&__pyx_n_s_input_image, __pyx_k_input_image, sizeof(__pyx_k_input_image), 0, 0, 1, 1}, {&__pyx_n_s_int8, __pyx_k_int8, sizeof(__pyx_k_int8), 0, 0, 1, 1}, {&__pyx_n_s_interp_coef, __pyx_k_interp_coef, sizeof(__pyx_k_interp_coef), 0, 0, 1, 1}, {&__pyx_n_s_intp, __pyx_k_intp, sizeof(__pyx_k_intp), 0, 0, 1, 1}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_j_max, __pyx_k_j_max, sizeof(__pyx_k_j_max), 0, 0, 1, 1}, {&__pyx_n_s_j_min, __pyx_k_j_min, sizeof(__pyx_k_j_min), 0, 0, 1, 1}, {&__pyx_n_s_j_sec, __pyx_k_j_sec, sizeof(__pyx_k_j_sec), 0, 0, 1, 1}, {&__pyx_n_s_j_shift, __pyx_k_j_shift, sizeof(__pyx_k_j_shift), 0, 0, 1, 1}, {&__pyx_n_s_l, __pyx_k_l, sizeof(__pyx_k_l), 0, 0, 1, 1}, {&__pyx_n_s_l0, __pyx_k_l0, sizeof(__pyx_k_l0), 0, 0, 1, 1}, {&__pyx_n_s_l1, __pyx_k_l1, sizeof(__pyx_k_l1), 0, 0, 1, 1}, {&__pyx_n_s_l_i, __pyx_k_l_i, sizeof(__pyx_k_l_i), 0, 0, 1, 1}, {&__pyx_n_s_ll, __pyx_k_ll, sizeof(__pyx_k_ll), 0, 0, 1, 1}, {&__pyx_n_s_m, __pyx_k_m, sizeof(__pyx_k_m), 0, 0, 1, 1}, {&__pyx_n_s_m0, __pyx_k_m0, sizeof(__pyx_k_m0), 0, 0, 1, 1}, {&__pyx_n_s_m1, __pyx_k_m1, sizeof(__pyx_k_m1), 0, 0, 1, 1}, {&__pyx_n_s_m_j, __pyx_k_m_j, sizeof(__pyx_k_m_j), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mask, __pyx_k_mask, sizeof(__pyx_k_mask), 0, 0, 1, 1}, {&__pyx_n_s_mean, __pyx_k_mean, sizeof(__pyx_k_mean), 0, 0, 1, 1}, {&__pyx_n_s_mm, __pyx_k_mm, sizeof(__pyx_k_mm), 0, 0, 1, 1}, {&__pyx_n_s_morph_mask, __pyx_k_morph_mask, sizeof(__pyx_k_morph_mask), 0, 0, 1, 1}, {&__pyx_n_s_morphed_image, __pyx_k_morphed_image, sizeof(__pyx_k_morphed_image), 0, 0, 1, 1}, {&__pyx_n_s_morphed_mask, __pyx_k_morphed_mask, sizeof(__pyx_k_morphed_mask), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_new_image, __pyx_k_new_image, sizeof(__pyx_k_new_image), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_kp_u_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 1, 0, 0}, {&__pyx_kp_u_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 1, 0, 0}, {&__pyx_n_s_nx, __pyx_k_nx, sizeof(__pyx_k_nx), 0, 0, 1, 1}, {&__pyx_n_s_ny, __pyx_k_ny, sizeof(__pyx_k_ny), 0, 0, 1, 1}, {&__pyx_n_s_pysteps_motion__vet, __pyx_k_pysteps_motion__vet, sizeof(__pyx_k_pysteps_motion__vet), 0, 0, 1, 1}, {&__pyx_kp_s_pysteps_motion__vet_pyx, __pyx_k_pysteps_motion__vet_pyx, sizeof(__pyx_k_pysteps_motion__vet_pyx), 0, 0, 1, 0}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reshape, __pyx_k_reshape, sizeof(__pyx_k_reshape), 0, 0, 1, 1}, {&__pyx_n_s_residuals, __pyx_k_residuals, sizeof(__pyx_k_residuals), 0, 0, 1, 1}, {&__pyx_n_s_return_counts, __pyx_k_return_counts, sizeof(__pyx_k_return_counts), 0, 0, 1, 1}, {&__pyx_n_s_return_index, __pyx_k_return_index, sizeof(__pyx_k_return_index), 0, 0, 1, 1}, {&__pyx_n_s_sector_area, __pyx_k_sector_area, sizeof(__pyx_k_sector_area), 0, 0, 1, 1}, {&__pyx_n_s_sector_displacement, __pyx_k_sector_displacement, sizeof(__pyx_k_sector_displacement), 0, 0, 1, 1}, {&__pyx_n_s_smooth_gain, __pyx_k_smooth_gain, sizeof(__pyx_k_smooth_gain), 0, 0, 1, 1}, {&__pyx_n_s_smoothness_penalty, __pyx_k_smoothness_penalty, sizeof(__pyx_k_smoothness_penalty), 0, 0, 1, 1}, {&__pyx_n_s_sum, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1}, {&__pyx_n_s_template_image, __pyx_k_template_image, sizeof(__pyx_k_template_image), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_unique, __pyx_k_unique, sizeof(__pyx_k_unique), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_warp, __pyx_k_warp, sizeof(__pyx_k_warp), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_x_ceil, __pyx_k_x_ceil, sizeof(__pyx_k_x_ceil), 0, 0, 1, 1}, {&__pyx_n_s_x_float, __pyx_k_x_float, sizeof(__pyx_k_x_float), 0, 0, 1, 1}, {&__pyx_n_s_x_floor, __pyx_k_x_floor, sizeof(__pyx_k_x_floor), 0, 0, 1, 1}, {&__pyx_n_s_x_guess, __pyx_k_x_guess, sizeof(__pyx_k_x_guess), 0, 0, 1, 1}, {&__pyx_n_s_x_image_size, __pyx_k_x_image_size, sizeof(__pyx_k_x_image_size), 0, 0, 1, 1}, {&__pyx_n_s_x_max_float, __pyx_k_x_max_float, sizeof(__pyx_k_x_max_float), 0, 0, 1, 1}, {&__pyx_n_s_x_max_int, __pyx_k_x_max_int, sizeof(__pyx_k_x_max_int), 0, 0, 1, 1}, {&__pyx_n_s_x_sector_size, __pyx_k_x_sector_size, sizeof(__pyx_k_x_sector_size), 0, 0, 1, 1}, {&__pyx_n_s_x_sectors, __pyx_k_x_sectors, sizeof(__pyx_k_x_sectors), 0, 0, 1, 1}, {&__pyx_n_s_xy, __pyx_k_xy, sizeof(__pyx_k_xy), 0, 0, 1, 1}, {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, {&__pyx_n_s_y_ceil, __pyx_k_y_ceil, sizeof(__pyx_k_y_ceil), 0, 0, 1, 1}, {&__pyx_n_s_y_float, __pyx_k_y_float, sizeof(__pyx_k_y_float), 0, 0, 1, 1}, {&__pyx_n_s_y_floor, __pyx_k_y_floor, sizeof(__pyx_k_y_floor), 0, 0, 1, 1}, {&__pyx_n_s_y_guess, __pyx_k_y_guess, sizeof(__pyx_k_y_guess), 0, 0, 1, 1}, {&__pyx_n_s_y_image_size, __pyx_k_y_image_size, sizeof(__pyx_k_y_image_size), 0, 0, 1, 1}, {&__pyx_n_s_y_max_float, __pyx_k_y_max_float, sizeof(__pyx_k_y_max_float), 0, 0, 1, 1}, {&__pyx_n_s_y_max_int, __pyx_k_y_max_int, sizeof(__pyx_k_y_max_int), 0, 0, 1, 1}, {&__pyx_n_s_y_sector_size, __pyx_k_y_sector_size, sizeof(__pyx_k_y_sector_size), 0, 0, 1, 1}, {&__pyx_n_s_y_sectors, __pyx_k_y_sectors, sizeof(__pyx_k_y_sectors), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {&__pyx_n_s_zip, __pyx_k_zip, sizeof(__pyx_k_zip), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 163, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 357, __pyx_L1_error) __pyx_builtin_zip = __Pyx_GetBuiltinName(__pyx_n_s_zip); if (!__pyx_builtin_zip) __PYX_ERR(0, 464, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 856, __pyx_L1_error) __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1038, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "pysteps/motion/_vet.pyx":357 * * if x_image_size % x_sectors != 0: * raise ValueError("Error computing cost function.\n", # <<<<<<<<<<<<<< * "The number of sectors in x axis (axis=0)" * + " don't divide the image size") */ __pyx_tuple_ = PyTuple_Pack(2, __pyx_kp_u_Error_computing_cost_function, __pyx_kp_u_The_number_of_sectors_in_x_axis); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 357, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "pysteps/motion/_vet.pyx":362 * * if y_image_size % y_sectors != 0: * raise ValueError("Error computing cost function.\n", # <<<<<<<<<<<<<< * "The number of sectors in y axis (axis=1) don't" * + " divide the image size") */ __pyx_tuple__2 = PyTuple_Pack(2, __pyx_kp_u_Error_computing_cost_function, __pyx_kp_u_The_number_of_sectors_in_y_axis); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 362, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "pysteps/motion/_vet.pyx":504 * buffer[morph_mask == 1] = 0 * * _gradient_data[0, :] *= buffer # <<<<<<<<<<<<<< * _gradient_data[1, :] *= buffer * */ __pyx_slice__3 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); __pyx_tuple__4 = PyTuple_Pack(2, __pyx_int_0, __pyx_slice__3); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "pysteps/motion/_vet.pyx":505 * * _gradient_data[0, :] *= buffer * _gradient_data[1, :] *= buffer # <<<<<<<<<<<<<< * * for l in range(x_sectors): # schedule='dynamic', nogil=True): */ __pyx_tuple__5 = PyTuple_Pack(2, __pyx_int_1, __pyx_slice__3); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 505, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":306 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":880 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 880, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1038 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 1038, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1044 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "pysteps/motion/_vet.pyx":67 * @cython.nonecheck(False) * @cython.cdivision(True) * def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<< * np.ndarray[int8, ndim=2] mask, * np.ndarray[float64, ndim=3] displacement, */ __pyx_tuple__13 = PyTuple_Pack(27, __pyx_n_s_image, __pyx_n_s_mask, __pyx_n_s_displacement, __pyx_n_s_gradient, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_new_image, __pyx_n_s_morphed_mask, __pyx_n_s_gradient_values, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_x_max_int, __pyx_n_s_y_max_int, __pyx_n_s_x_max_float, __pyx_n_s_y_max_float, __pyx_n_s_x_float, __pyx_n_s_y_float, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_x_floor, __pyx_n_s_x_ceil, __pyx_n_s_y_floor, __pyx_n_s_y_ceil, __pyx_n_s_f00, __pyx_n_s_f10, __pyx_n_s_f01, __pyx_n_s_f11); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(4, 0, 27, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pysteps_motion__vet_pyx, __pyx_n_s_warp, 67, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 67, __pyx_L1_error) /* "pysteps/motion/_vet.pyx":240 * @cython.nonecheck(False) * @cython.cdivision(True) * def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<< * np.ndarray[float64, ndim=2] template_image, * np.ndarray[float64, ndim=2] input_image, */ __pyx_tuple__15 = PyTuple_Pack(54, __pyx_n_s_sector_displacement, __pyx_n_s_template_image, __pyx_n_s_input_image, __pyx_n_s_mask, __pyx_n_s_smooth_gain, __pyx_n_s_gradient, __pyx_n_s_x_sectors, __pyx_n_s_y_sectors, __pyx_n_s_x_image_size, __pyx_n_s_y_image_size, __pyx_n_s_x_sector_size, __pyx_n_s_y_sector_size, __pyx_n_s_displacement, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_xy, __pyx_n_s_l, __pyx_n_s_m, __pyx_n_s_ll, __pyx_n_s_mm, __pyx_n_s_i_sec, __pyx_n_s_j_sec, __pyx_n_s_l0, __pyx_n_s_m0, __pyx_n_s_l1, __pyx_n_s_m1, __pyx_n_s_i_shift, __pyx_n_s_j_shift, __pyx_n_s_axis, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_x_guess, __pyx_n_s_y_guess, __pyx_n_s_sector_area, __pyx_n_s_interp_coef, __pyx_n_s_l_i, __pyx_n_s_m_j, __pyx_n_s_i_min, __pyx_n_s_i_max, __pyx_n_s_j_min, __pyx_n_s_j_max, __pyx_n_s_counts, __pyx_n_s_morphed_image, __pyx_n_s_morph_mask, __pyx_n_s_gradient_data, __pyx_n_s_grad_residuals, __pyx_n_s_grad_smooth, __pyx_n_s_buffer, __pyx_n_s_residuals, __pyx_n_s_smoothness_penalty, __pyx_n_s_df_dx2, __pyx_n_s_df_dxdy, __pyx_n_s_df_dy2, __pyx_n_s_inloop_smoothness_penalty); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(6, 0, 54, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pysteps_motion__vet_pyx, __pyx_n_s_cost_function, 240, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_float_1_0 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_float_1_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 206, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 206, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 229, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 233, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 242, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 918, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #if PY_MAJOR_VERSION < 3 #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC void #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #else #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyObject * #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC init_vet(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC init_vet(void) #else __Pyx_PyMODINIT_FUNC PyInit__vet(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit__vet(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec__vet(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module '_vet' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__vet(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("_vet", __pyx_methods, __pyx_k_Cython_module_for_morphing_and, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_pysteps__motion___vet) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "pysteps.motion._vet")) { if (unlikely(PyDict_SetItemString(modules, "pysteps.motion._vet", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) goto __pyx_L1_error; /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) goto __pyx_L1_error; /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); (void)__Pyx_modinit_type_init_code(); if (unlikely(__Pyx_modinit_type_import_code() != 0)) goto __pyx_L1_error; (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "pysteps/motion/_vet.pyx":8 * """ * from cython.parallel import prange, parallel * import numpy as np # <<<<<<<<<<<<<< * * cimport cython */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "pysteps/motion/_vet.pyx":67 * @cython.nonecheck(False) * @cython.cdivision(True) * def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<< * np.ndarray[int8, ndim=2] mask, * np.ndarray[float64, ndim=3] displacement, */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7pysteps_6motion_4_vet_1_warp, NULL, __pyx_n_s_pysteps_motion__vet); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_warp, __pyx_t_1) < 0) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "pysteps/motion/_vet.pyx":240 * @cython.nonecheck(False) * @cython.cdivision(True) * def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<< * np.ndarray[float64, ndim=2] template_image, * np.ndarray[float64, ndim=2] input_image, */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7pysteps_6motion_4_vet_3_cost_function, NULL, __pyx_n_s_pysteps_motion__vet); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_cost_function, __pyx_t_1) < 0) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "pysteps/motion/_vet.pyx":1 * # -*- coding: utf-8 -*- # <<<<<<<<<<<<<< * * """ */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init pysteps.motion._vet", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init pysteps.motion._vet"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* BufferGetAndValidate */ static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (unlikely(info->buf == NULL)) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static int __Pyx__GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { buf->buf = NULL; if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { __Pyx_ZeroBuffer(buf); return -1; } if (unlikely(buf->ndim != nd)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if (unlikely((size_t)buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_SafeReleaseBuffer(buf); return -1; } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* BufferFallbackError */ static void __Pyx_RaiseBufferFallbackError(void) { PyErr_SetString(PyExc_ValueError, "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); } /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* IterFinish */ static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_FAST_THREAD_STATE PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } /* UnpackItemEndCheck */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a - b); if (likely((x^a) >= 0 || (x^~b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_subtract(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2); } } x = a - b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla - llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("subtract", return NULL) result = ((double)a) - (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); } #endif /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* DictGetItem */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { if (unlikely(PyTuple_Check(key))) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) { PyErr_SetObject(PyExc_KeyError, args); Py_DECREF(args); } } else { PyErr_SetObject(PyExc_KeyError, key); } } return NULL; } Py_INCREF(value); return value; } #endif /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size) { PyObject *result = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif result = PyObject_GetAttrString(module, class_name); if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if ((size_t)basicsize < size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(result); return NULL; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); view->obj = NULL; Py_DECREF(obj); } #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value) { const Py_intptr_t neg_one = (Py_intptr_t) ((Py_intptr_t) 0 - (Py_intptr_t) 1), const_zero = (Py_intptr_t) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(Py_intptr_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(Py_intptr_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(Py_intptr_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(Py_intptr_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabsf(b.real) >= fabsf(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { float r = b.imag / b.real; float s = (float)(1.0) / (b.real + b.imag * r); return __pyx_t_float_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { float r = b.real / b.imag; float s = (float)(1.0) / (b.imag + b.real * r); return __pyx_t_float_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else { float denom = b.real * b.real + b.imag * b.imag; return __pyx_t_float_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(a, a); case 3: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, a); case 4: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = powf(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2f(0.0, -1.0); } } else { r = __Pyx_c_abs_float(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabs(b.real) >= fabs(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { double r = b.imag / b.real; double s = (double)(1.0) / (b.real + b.imag * r); return __pyx_t_double_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { double r = b.real / b.imag; double s = (double)(1.0) / (b.imag + b.real * r); return __pyx_t_double_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else { double denom = b.real * b.real + b.imag * b.imag; return __pyx_t_double_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(a, a); case 3: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, a); case 4: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = pow(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2(0.0, -1.0); } } else { r = __Pyx_c_abs_double(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) ((enum NPY_TYPES) 0 - (enum NPY_TYPES) 1), const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *x) { const Py_intptr_t neg_one = (Py_intptr_t) ((Py_intptr_t) 0 - (Py_intptr_t) 1), const_zero = (Py_intptr_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(Py_intptr_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (Py_intptr_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (Py_intptr_t) 0; case 1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, digit, digits[0]) case 2: if (8 * sizeof(Py_intptr_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) >= 2 * PyLong_SHIFT) { return (Py_intptr_t) (((((Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])); } } break; case 3: if (8 * sizeof(Py_intptr_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) >= 3 * PyLong_SHIFT) { return (Py_intptr_t) (((((((Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])); } } break; case 4: if (8 * sizeof(Py_intptr_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) >= 4 * PyLong_SHIFT) { return (Py_intptr_t) (((((((((Py_intptr_t)digits[3]) << PyLong_SHIFT) | (Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (Py_intptr_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(Py_intptr_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (Py_intptr_t) 0; case -1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, digit, +digits[0]) case -2: if (8 * sizeof(Py_intptr_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) - 1 > 2 * PyLong_SHIFT) { return (Py_intptr_t) (((Py_intptr_t)-1)*(((((Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]))); } } break; case 2: if (8 * sizeof(Py_intptr_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) - 1 > 2 * PyLong_SHIFT) { return (Py_intptr_t) ((((((Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]))); } } break; case -3: if (8 * sizeof(Py_intptr_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) - 1 > 3 * PyLong_SHIFT) { return (Py_intptr_t) (((Py_intptr_t)-1)*(((((((Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]))); } } break; case 3: if (8 * sizeof(Py_intptr_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) - 1 > 3 * PyLong_SHIFT) { return (Py_intptr_t) ((((((((Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]))); } } break; case -4: if (8 * sizeof(Py_intptr_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) - 1 > 4 * PyLong_SHIFT) { return (Py_intptr_t) (((Py_intptr_t)-1)*(((((((((Py_intptr_t)digits[3]) << PyLong_SHIFT) | (Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]))); } } break; case 4: if (8 * sizeof(Py_intptr_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(Py_intptr_t) - 1 > 4 * PyLong_SHIFT) { return (Py_intptr_t) ((((((((((Py_intptr_t)digits[3]) << PyLong_SHIFT) | (Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]))); } } break; } #endif if (sizeof(Py_intptr_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(Py_intptr_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else Py_intptr_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (Py_intptr_t) -1; } } else { Py_intptr_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (Py_intptr_t) -1; val = __Pyx_PyInt_As_Py_intptr_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to Py_intptr_t"); return (Py_intptr_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to Py_intptr_t"); return (Py_intptr_t) -1; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
c-omp.c
/* This file contains routines to construct GNU OpenMP constructs, called from parsing in the C and C++ front ends. Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>, Diego Novillo <dnovillo@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "function.h" #include "c-common.h" #include "toplev.h" #include "gimple.h" #include "bitmap.h" #include "langhooks.h" /* Complete a #pragma omp master construct. STMT is the structured-block that follows the pragma. LOC is the l*/ tree c_finish_omp_master (location_t loc, tree stmt) { tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt)); SET_EXPR_LOCATION (t, loc); return t; } /* Complete a #pragma omp critical construct. STMT is the structured-block that follows the pragma, NAME is the identifier in the pragma, or null if it was omitted. LOC is the location of the #pragma. */ tree c_finish_omp_critical (location_t loc, tree body, tree name) { tree stmt = make_node (OMP_CRITICAL); TREE_TYPE (stmt) = void_type_node; OMP_CRITICAL_BODY (stmt) = body; OMP_CRITICAL_NAME (stmt) = name; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Complete a #pragma omp ordered construct. STMT is the structured-block that follows the pragma. LOC is the location of the #pragma. */ tree c_finish_omp_ordered (location_t loc, tree stmt) { tree t = build1 (OMP_ORDERED, void_type_node, stmt); SET_EXPR_LOCATION (t, loc); return add_stmt (t); } /* Complete a #pragma omp barrier construct. LOC is the location of the #pragma. */ void c_finish_omp_barrier (location_t loc) { tree x; x = built_in_decls[BUILT_IN_GOMP_BARRIER]; x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp taskwait construct. LOC is the location of the pragma. */ void c_finish_omp_taskwait (location_t loc) { tree x; x = built_in_decls[BUILT_IN_GOMP_TASKWAIT]; x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp atomic construct. The expression to be implemented atomically is LHS code= RHS. LOC is the location of the atomic statement. The value returned is either error_mark_node (if the construct was erroneous) or an OMP_ATOMIC node which should be added to the current statement tree with add_stmt.*/ tree c_finish_omp_atomic (location_t loc, enum tree_code code, tree lhs, tree rhs) { tree x, type, addr; if (lhs == error_mark_node || rhs == error_mark_node) return error_mark_node; /* ??? According to one reading of the OpenMP spec, complex type are supported, but there are no atomic stores for any architecture. But at least icc 9.0 doesn't support complex types here either. And lets not even talk about vector types... */ type = TREE_TYPE (lhs); if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type) && !SCALAR_FLOAT_TYPE_P (type)) { error_at (loc, "invalid expression type for %<#pragma omp atomic%>"); return error_mark_node; } /* ??? Validate that rhs does not overlap lhs. */ /* Take and save the address of the lhs. From then on we'll reference it via indirection. */ addr = build_unary_op (loc, ADDR_EXPR, lhs, 0); if (addr == error_mark_node) return error_mark_node; addr = save_expr (addr); if (TREE_CODE (addr) != SAVE_EXPR && (TREE_CODE (addr) != ADDR_EXPR || TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL)) { /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize it even after unsharing function body. */ tree var = create_tmp_var_raw (TREE_TYPE (addr), NULL); DECL_CONTEXT (var) = current_function_decl; addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); } lhs = build_indirect_ref (loc, addr, RO_NULL); /* There are lots of warnings, errors, and conversions that need to happen in the course of interpreting a statement. Use the normal mechanisms to do this, and then take it apart again. */ x = build_modify_expr (input_location, lhs, NULL_TREE, code, input_location, rhs, NULL_TREE); if (x == error_mark_node) return error_mark_node; gcc_assert (TREE_CODE (x) == MODIFY_EXPR); rhs = TREE_OPERAND (x, 1); /* Punt the actual generation of atomic operations to common code. */ x = build2 (OMP_ATOMIC, void_type_node, addr, rhs); SET_EXPR_LOCATION (x, loc); return x; } /* Complete a #pragma omp flush construct. We don't do anything with the variable list that the syntax allows. LOC is the location of the #pragma. */ void c_finish_omp_flush (location_t loc) { tree x; x = built_in_decls[BUILT_IN_SYNCHRONIZE]; x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Check and canonicalize #pragma omp for increment expression. Helper function for c_finish_omp_for. */ static tree check_omp_for_incr_expr (location_t loc, tree exp, tree decl) { tree t; if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) return error_mark_node; if (exp == decl) return build_int_cst (TREE_TYPE (exp), 0); switch (TREE_CODE (exp)) { CASE_CONVERT: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_convert_loc (loc, TREE_TYPE (exp), t); break; case MINUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); break; case PLUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); break; default: break; } return error_mark_node; } /* Validate and emit code for the OpenMP directive #pragma omp for. DECLV is a vector of iteration variables, for each collapsed loop. INITV, CONDV and INCRV are vectors containing initialization expressions, controlling predicates and increment expressions. BODY is the body of the loop and PRE_BODY statements that go before the loop. */ tree c_finish_omp_for (location_t locus, tree declv, tree initv, tree condv, tree incrv, tree body, tree pre_body) { location_t elocus; bool fail = false; int i; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); /* Validate the iteration variable. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) { error_at (elocus, "invalid type for iteration variable %qE", decl); fail = true; } /* In the case of "for (int i = 0...)", init will be a decl. It should have a DECL_INITIAL that we can turn into an assignment. */ if (init == decl) { elocus = DECL_SOURCE_LOCATION (decl); init = DECL_INITIAL (decl); if (init == NULL) { error_at (elocus, "%qE is not initialized", decl); init = integer_zero_node; fail = true; } init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, /* FIXME diagnostics: This should be the location of the INIT. */ elocus, init, NULL_TREE); } gcc_assert (TREE_CODE (init) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (init, 0) == decl); if (cond == NULL_TREE) { error_at (elocus, "missing controlling predicate"); fail = true; } else { bool cond_ok = false; if (EXPR_HAS_LOCATION (cond)) elocus = EXPR_LOCATION (cond); if (TREE_CODE (cond) == LT_EXPR || TREE_CODE (cond) == LE_EXPR || TREE_CODE (cond) == GT_EXPR || TREE_CODE (cond) == GE_EXPR || TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); /* 2.5.1. The comparison in the condition is computed in the type of DECL, otherwise the behavior is undefined. For example: long n; int i; i < n; according to ISO will be evaluated as: (long)i < n; We want to force: i < (int)n; */ if (TREE_CODE (op0) == NOP_EXPR && decl == TREE_OPERAND (op0, 0)) { TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); TREE_OPERAND (cond, 1) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 1)); } else if (TREE_CODE (op1) == NOP_EXPR && decl == TREE_OPERAND (op1, 0)) { TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); TREE_OPERAND (cond, 0) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 0)); } if (decl == TREE_OPERAND (cond, 0)) cond_ok = true; else if (decl == TREE_OPERAND (cond, 1)) { TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond))); TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); TREE_OPERAND (cond, 0) = decl; cond_ok = true; } if (TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) cond_ok = false; else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MIN_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? GT_EXPR : LE_EXPR); else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MAX_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? LT_EXPR : GE_EXPR); else cond_ok = false; } } if (!cond_ok) { error_at (elocus, "invalid controlling predicate"); fail = true; } } if (incr == NULL_TREE) { error_at (elocus, "missing increment expression"); fail = true; } else { bool incr_ok = false; if (EXPR_HAS_LOCATION (incr)) elocus = EXPR_LOCATION (incr); /* Check all the valid increment expressions: v++, v--, ++v, --v, v = v + incr, v = incr + v and v = v - incr. */ switch (TREE_CODE (incr)) { case POSTINCREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; incr_ok = true; if (POINTER_TYPE_P (TREE_TYPE (decl)) && TREE_OPERAND (incr, 1)) { tree t = fold_convert_loc (elocus, sizetype, TREE_OPERAND (incr, 1)); if (TREE_CODE (incr) == POSTDECREMENT_EXPR || TREE_CODE (incr) == PREDECREMENT_EXPR) t = fold_build1_loc (elocus, NEGATE_EXPR, sizetype, t); t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } break; case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; if (TREE_OPERAND (incr, 1) == decl) break; if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) incr_ok = true; else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR || (TREE_CODE (TREE_OPERAND (incr, 1)) == POINTER_PLUS_EXPR)) && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) incr_ok = true; else { tree t = check_omp_for_incr_expr (elocus, TREE_OPERAND (incr, 1), decl); if (t != error_mark_node) { incr_ok = true; t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } } break; default: break; } if (!incr_ok) { error_at (elocus, "invalid increment expression"); fail = true; } } TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (incrv, i) = incr; } if (fail) return NULL; else { tree t = make_node (OMP_FOR); TREE_TYPE (t) = void_type_node; OMP_FOR_INIT (t) = initv; OMP_FOR_COND (t) = condv; OMP_FOR_INCR (t) = incrv; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; SET_EXPR_LOCATION (t, locus); return add_stmt (t); } } /* Divide CLAUSES into two lists: those that apply to a parallel construct, and those that apply to a work-sharing construct. Place the results in *PAR_CLAUSES and *WS_CLAUSES respectively. In addition, add a nowait clause to the work-sharing list. LOC is the location of the OMP_PARALLEL*. */ void c_split_parallel_clauses (location_t loc, tree clauses, tree *par_clauses, tree *ws_clauses) { tree next; *par_clauses = NULL; *ws_clauses = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_DEFAULT: OMP_CLAUSE_CHAIN (clauses) = *par_clauses; *par_clauses = clauses; break; case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_COLLAPSE: OMP_CLAUSE_CHAIN (clauses) = *ws_clauses; *ws_clauses = clauses; break; default: gcc_unreachable (); } } } /* True if OpenMP sharing attribute of DECL is predetermined. */ enum omp_clause_default_kind c_omp_predetermined_sharing (tree decl) { /* Variables with const-qualified type having no mutable member are predetermined shared. */ if (TREE_READONLY (decl)) return OMP_CLAUSE_DEFAULT_SHARED; return OMP_CLAUSE_DEFAULT_UNSPECIFIED; }
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickCore Deprecated Methods % % % % Software Design % % John Cristy % % October 2002 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/geometry.h" #include "magick/identify.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/magick.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/morphology.h" #include "magick/paint.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/segment.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/utility.h" #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* Global declarations. */ static MonitorHandler monitor_handler = (MonitorHandler) NULL; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e C a c h e V i e w I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireCacheViewIndexes() returns the indexes associated with the specified % view. % % Deprecated, replace with: % % GetCacheViewVirtualIndexQueue(cache_view); % % The format of the AcquireCacheViewIndexes method is: % % const IndexPacket *AcquireCacheViewIndexes(const CacheView *cache_view) % % A description of each parameter follows: % % o cache_view: the cache view. % */ MagickExport const IndexPacket *AcquireCacheViewIndexes( const CacheView *cache_view) { return(GetCacheViewVirtualIndexQueue(cache_view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e C a c h e V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireCacheViewPixels() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % Deprecated, replace with: % % GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception); % % The format of the AcquireCacheViewPixels method is: % % const PixelPacket *AcquireCacheViewPixels(const CacheView *cache_view, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_view: the cache view. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const PixelPacket *AcquireCacheViewPixels( const CacheView *cache_view,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { return(GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImagePixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in RAM, or in a memory-mapped file. The % returned pointer should *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access % the black color component or to obtain the colormap indexes (of type % IndexPacket) corresponding to the region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the AcquireImagePixels() and GetAuthenticPixels() methods are not % thread-safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % Deprecated, replace with: % % GetVirtualPixels(image,x,y,columns,rows,exception); % % The format of the AcquireImagePixels() method is: % % const PixelPacket *AcquireImagePixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const PixelPacket *AcquireImagePixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,ExceptionInfo *exception) { return(GetVirtualPixels(image,x,y,columns,rows,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireIndexes() returns the black channel or the colormap indexes % associated with the last call to QueueAuthenticPixels() or % GetVirtualPixels(). NULL is returned if the black channel or colormap % indexes are not available. % % Deprecated, replace with: % % GetVirtualIndexQueue(image); % % The format of the AcquireIndexes() method is: % % const IndexPacket *AcquireIndexes(const Image *image) % % A description of each parameter follows: % % o indexes: AcquireIndexes() returns the indexes associated with the last % call to QueueAuthenticPixels() or GetVirtualPixels(). % % o image: the image. % */ MagickExport const IndexPacket *AcquireIndexes(const Image *image) { return(GetVirtualIndexQueue(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMemory() returns a pointer to a block of memory at least size bytes % suitably aligned for any use. % % The format of the AcquireMemory method is: % % void *AcquireMemory(const size_t size) % % A description of each parameter follows: % % o size: the size of the memory in bytes to allocate. % */ MagickExport void *AcquireMemory(const size_t size) { void *allocation; assert(size != 0); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); allocation=malloc(size); return(allocation); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e C a c h e V i e w P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOneCacheViewPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead. % % Deprecated, replace with: % % GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception); % % The format of the AcquireOneCacheViewPixel method is: % % MagickBooleanType AcquireOneCacheViewPixel(const CacheView *cache_view, % const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_view: the cache view. % % o x,y: These values define the offset of the pixel. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AcquireOneCacheViewPixel( const CacheView *cache_view,const ssize_t x,const ssize_t y,PixelPacket *pixel, ExceptionInfo *exception) { return(GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e C a c h e V i e w V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOneCacheViewVirtualPixel() returns a single pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead. % % Deprecated, replace with: % % GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method, % x,y,pixel,exception); % % The format of the AcquireOneCacheViewPixel method is: % % MagickBooleanType AcquireOneCacheViewVirtualPixel( % const CacheView *cache_view, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_view: the cache view. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the offset of the pixel. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AcquireOneCacheViewVirtualPixel( const CacheView *cache_view,const VirtualPixelMethod virtual_pixel_method, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { MagickBooleanType status; status=GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method, x,y,pixel,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e M a g i c k P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOneMagickPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOnePixel() instead. % % Deprecated, replace with: % % MagickPixelPacket pixel; % GetOneVirtualMagickPixel(image,x,y,&pixel,exception); % % The format of the AcquireOneMagickPixel() method is: % % MagickPixelPacket AcquireOneMagickPixel(const Image image,const ssize_t x, % const ssize_t y,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickPixelPacket AcquireOneMagickPixel(const Image *image, const ssize_t x,const ssize_t y,ExceptionInfo *exception) { MagickPixelPacket pixel; (void) GetOneVirtualMagickPixel(image,x,y,&pixel,exception); return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOnePixel() returns a single pixel at the specified (x,y) location. % The image background color is returned if an error occurs. If you plan to % modify the pixel, use GetOnePixel() instead. % % Deprecated, replace with: % % PixelPacket pixel; % GetOneVirtualPixel(image,x,y,&pixel,exception); % % The format of the AcquireOnePixel() method is: % % PixelPacket AcquireOnePixel(const Image image,const ssize_t x, % const ssize_t y,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket AcquireOnePixel(const Image *image,const ssize_t x, const ssize_t y,ExceptionInfo *exception) { PixelPacket pixel; (void) GetOneVirtualPixel(image,x,y,&pixel,exception); return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOneVirtualPixel() returns a single pixel at the specified (x,y) % location as defined by specified pixel method. The image background color % is returned if an error occurs. If you plan to modify the pixel, use % GetOnePixel() instead. % % Deprecated, replace with: % % PixelPacket pixel; % GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel,exception); % % The format of the AcquireOneVirtualPixel() method is: % % PixelPacket AcquireOneVirtualPixel(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,ExceptionInfo exception) % % A description of each parameter follows: % % o virtual_pixel_method: the virtual pixel method. % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket AcquireOneVirtualPixel(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, ExceptionInfo *exception) { PixelPacket pixel; (void) GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel, exception); return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixels() returns the pixels associated with the last call to % QueueAuthenticPixels() or GetVirtualPixels(). % % Deprecated, replace with: % % GetVirtualPixelQueue(image); % % The format of the AcquirePixels() method is: % % const PixelPacket *AcquirePixels(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const PixelPacket *AcquirePixels(const Image *image) { return(GetVirtualPixelQueue(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffinityImage() replaces the colors of an image with the closest color from % a reference image. % % Deprecated, replace with: % % RemapImage(quantize_info,image,affinity_image); % % The format of the AffinityImage method is: % % MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info, % Image *image,const Image *affinity_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o affinity_image: the reference image. % */ MagickExport MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info, Image *image,const Image *affinity_image) { return(RemapImage(quantize_info,image,affinity_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n i t y I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffinityImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % Deprecated, replace with: % % RemapImages(quantize_info,images,affinity_image); % % The format of the AffinityImage method is: % % MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info, % Image *images,Image *affinity_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o affinity_image: the reference image. % */ MagickExport MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info, Image *images,const Image *affinity_image) { return(RemapImages(quantize_info,images,affinity_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A l l o c a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AllocateImage() returns a pointer to an image structure initialized to % default values. % % Deprecated, replace with: % % AcquireImage(image_info); % % The format of the AllocateImage method is: % % Image *AllocateImage(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % */ MagickExport Image *AllocateImage(const ImageInfo *image_info) { return(AcquireImage(image_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A l l o c a t e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AllocateImageColormap() allocates an image colormap and initializes % it to a linear gray colorspace. If the image already has a colormap, % it is replaced. AllocateImageColormap() returns MagickTrue if successful, % otherwise MagickFalse if there is not enough memory. % % Deprecated, replace with: % % AcquireImageColormap(image,colors); % % The format of the AllocateImageColormap method is: % % MagickBooleanType AllocateImageColormap(Image *image, % const size_t colors) % % A description of each parameter follows: % % o image: the image. % % o colors: the number of colors in the image colormap. % */ MagickExport MagickBooleanType AllocateImageColormap(Image *image, const size_t colors) { return(AcquireImageColormap(image,colors)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A l l o c a t e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AllocateNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % Deprecated, replace with: % % AcquireNextImage(image_info,image); % % The format of the AllocateNextImage method is: % % void AllocateNextImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % */ MagickExport void AllocateNextImage(const ImageInfo *image_info,Image *image) { AcquireNextImage(image_info,image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A l l o c a t e S t r i n g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AllocateString() allocates memory for a string and copies the source string % to that memory location (and returns it). % % The format of the AllocateString method is: % % char *AllocateString(const char *source) % % A description of each parameter follows: % % o source: A character string. % */ MagickExport char *AllocateString(const char *source) { char *destination; size_t length; assert(source != (const char *) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); length=strlen(source)+MaxTextExtent+1; destination=(char *) AcquireQuantumMemory(length,sizeof(*destination)); if (destination == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *destination='\0'; if (source != (char *) NULL) (void) CopyMagickString(destination,source,length); return(destination); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AverageImages() takes a set of images and averages them together. Each % image in the set must have the same width and height. AverageImages() % returns a single image with each corresponding pixel component of each % image averaged. On failure, a NULL image is returned and exception % describes the reason for the failure. % % Deprecated, replace with: % % EvaluateImages(images,MeanEvaluateOperator,exception); % % The format of the AverageImages method is: % % Image *AverageImages(Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AverageImages(const Image *images,ExceptionInfo *exception) { return(EvaluateImages(images,MeanEvaluateOperator,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Extract a channel from the image. A channel is a particular color component % of each pixel in the image. % % Deprecated, replace with: % % SeparateImageChannel(image,channel); % % The format of the ChannelImage method is: % % unsigned int ChannelImage(Image *image,const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channel to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, YellowChannel, % or BlackChannel. % */ MagickExport unsigned int ChannelImage(Image *image,const ChannelType channel) { return(SeparateImageChannel(image,channel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChannelThresholdImage() changes the value of individual pixels based on % the intensity of each pixel channel. The result is a high-contrast image. % % The format of the ChannelThresholdImage method is: % % unsigned int ChannelThresholdImage(Image *image,const char *level) % % A description of each parameter follows: % % o image: the image. % % o level: define the threshold values. % */ MagickExport unsigned int ChannelThresholdImage(Image *image,const char *level) { MagickPixelPacket threshold; GeometryInfo geometry_info; unsigned int flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; status=BilevelImageChannel(image,RedChannel,threshold.red); status|=BilevelImageChannel(image,GreenChannel,threshold.green); status|=BilevelImageChannel(image,BlueChannel,threshold.blue); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPathImage() sets the image clip mask based any clipping path information % if it exists. % % Deprecated, replace with: % % ClipImagePath(image,pathname,inside); % % The format of the ClipImage method is: % % MagickBooleanType ClipPathImage(Image *image,const char *pathname, % const MagickBooleanType inside) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ MagickExport MagickBooleanType ClipPathImage(Image *image,const char *pathname, const MagickBooleanType inside) { return(ClipImagePath(image,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e A t t r i b u t e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageAttributes() clones one or more image attributes. % % Deprecated, replace with: % % CloneImageProperties(image,clone_image); % % The format of the CloneImageAttributes method is: % % MagickBooleanType CloneImageAttributes(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageAttributes(Image *image, const Image *clone_image) { return(CloneImageProperties(image,clone_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneMemory() copies size bytes from memory area source to the destination. % Copying between objects that overlap will take place correctly. It returns % destination. % % The format of the CloneMemory method is: % % void *CloneMemory(void *destination,const void *source, % const size_t size) % % A description of each parameter follows: % % o destination: the destination. % % o source: the source. % % o size: the size of the memory in bytes to allocate. % */ MagickExport void *CloneMemory(void *destination,const void *source, const size_t size) { register const unsigned char *p; register unsigned char *q; register ssize_t i; assert(destination != (void *) NULL); assert(source != (const void *) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); p=(const unsigned char *) source; q=(unsigned char *) destination; if ((p <= q) || ((p+size) >= q)) return(CopyMagickMemory(destination,source,size)); /* Overlap, copy backwards. */ p+=size; q+=size; for (i=(ssize_t) (size-1); i >= 0; i--) *--q=(*--p); return(destination); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o s e C a c h e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloseCacheView() closes the specified view returned by a previous call to % OpenCacheView(). % % Deprecated, replace with: % % DestroyCacheView(view_info); % % The format of the CloseCacheView method is: % % CacheView *CloseCacheView(CacheView *view_info) % % A description of each parameter follows: % % o view_info: the address of a structure of type CacheView. % */ MagickExport CacheView *CloseCacheView(CacheView *view_info) { return(DestroyCacheView(view_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorFloodfill() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the ColorFloodfillImage method is: % % MagickBooleanType ColorFloodfillImage(Image *image, % const DrawInfo *draw_info,const PixelPacket target, % const ssize_t x_offset,const ssize_t y_offset,const PaintMethod method) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x,y: the starting location of the operation. % % o method: Choose either FloodfillMethod or FillToBorderMethod. % */ #define MaxStacksize (1UL << 15) #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } MagickExport MagickBooleanType ColorFloodfillImage(Image *image, const DrawInfo *draw_info,const PixelPacket target,const ssize_t x_offset, const ssize_t y_offset,const PaintMethod method) { Image *floodplane_image; MagickBooleanType skip; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue, &image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); /* Set floodfill color. */ segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize, sizeof(*segment_stack)); if (segment_stack == (SegmentInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Push initial segment on stack. */ x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); while (s > segment_stack) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception); q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) == MagickFalse) break; } else if (IsColorSimilar(image,p,&target) != MagickFalse) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetVirtualPixels(image,x,y,image->columns-x,1, &image->exception); q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) == MagickFalse) break; } else if (IsColorSimilar(image,p,&target) != MagickFalse) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1, &image->exception); q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) != MagickFalse) break; } else if (IsColorSimilar(image,p,&target) == MagickFalse) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; /* Tile fill color onto floodplane. */ p=GetVirtualPixels(floodplane_image,0,y,image->columns,1, &image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q, (MagickRealType) q->opacity,q); } p++; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageAttribute() deletes an attribute from the image. % % Deprecated, replace with: % % DeleteImageProperty(image,key); % % The format of the DeleteImageAttribute method is: % % MagickBooleanType DeleteImageAttribute(Image *image,const char *key) % % A description of each parameter follows: % % o image: the image info. % % o key: the image key. % */ MagickExport MagickBooleanType DeleteImageAttribute(Image *image, const char *key) { return(DeleteImageProperty(image,key)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageList() deletes an image at the specified position in the list. % % The format of the DeleteImageList method is: % % unsigned int DeleteImageList(Image *images,const ssize_t offset) % % A description of each parameter follows: % % o images: the image list. % % o offset: the position within the list. % */ MagickExport unsigned int DeleteImageList(Image *images,const ssize_t offset) { register ssize_t i; if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); while (GetPreviousImageInList(images) != (Image *) NULL) images=GetPreviousImageInList(images); for (i=0; i < offset; i++) { if (GetNextImageInList(images) == (Image *) NULL) return(MagickFalse); images=GetNextImageInList(images); } DeleteImageFromList(&images); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteMagickRegistry() deletes an entry in the registry as defined by the id. % It returns MagickTrue if the entry is deleted otherwise MagickFalse if no % entry is found in the registry that matches the id. % % Deprecated, replace with: % % char key[MaxTextExtent]; % FormatLocaleString(key,MaxTextExtent,"%ld\n",id); % DeleteImageRegistry(key); % % The format of the DeleteMagickRegistry method is: % % MagickBooleanType DeleteMagickRegistry(const ssize_t id) % % A description of each parameter follows: % % o id: the registry id. % */ MagickExport MagickBooleanType DeleteMagickRegistry(const ssize_t id) { char key[MaxTextExtent]; (void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id); return(DeleteImageRegistry(key)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C o n s t i t u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyConstitute() destroys the constitute component. % % The format of the DestroyConstitute method is: % % DestroyConstitute(void) % */ MagickExport void DestroyConstitute(void) { ConstituteComponentTerminus(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMagickRegistry() deallocates memory associated the magick registry. % % Deprecated, replace with: % % RegistryComponentTerminus(); % % The format of the DestroyMagickRegistry method is: % % void DestroyMagickRegistry(void) % */ MagickExport void DestroyMagickRegistry(void) { RegistryComponentTerminus(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DescribeImage() describes an image by printing its attributes to the file. % Attributes include the image width, height, size, and others. % % Deprecated, replace with: % % IdentifyImage(image,file,verbose); % % The format of the DescribeImage method is: % % MagickBooleanType DescribeImage(Image *image,FILE *file, % const MagickBooleanType verbose) % % A description of each parameter follows: % % o image: the image. % % o file: the file, typically stdout. % % o verbose: A value other than zero prints more detailed information % about the image. % */ MagickExport MagickBooleanType DescribeImage(Image *image,FILE *file, const MagickBooleanType verbose) { return(IdentifyImage(image,file,verbose)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e A t t r i b u t e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageAttributes() deallocates memory associated with the image % attribute list. % % The format of the DestroyImageAttributes method is: % % DestroyImageAttributes(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageAttributes(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->attributes != (void *) NULL) image->attributes=(void *) DestroySplayTree((SplayTreeInfo *) image->attributes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImages() destroys an image list. % % Deprecated, replace with: % % DestroyImageList(image); % % The format of the DestroyImages method is: % % void DestroyImages(Image *image) % % A description of each parameter follows: % % o image: the image sequence. % */ MagickExport void DestroyImages(Image *image) { if (image == (Image *) NULL) return; if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3"); image=DestroyImageList(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y M a g i c k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMagick() destroys the ImageMagick environment. % % Deprecated, replace with: % % MagickCoreTerminus(); % % The format of the DestroyMagick function is: % % DestroyMagick(void) % */ MagickExport void DestroyMagick(void) { MagickCoreTerminus(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D i s p a t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DispatchImage() extracts pixel data from an image and returns it to you. % The method returns MagickFalse on success otherwise MagickTrue if an error is % encountered. The data is returned as char, short int, int, ssize_t, float, % or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % DispatchImage(image,0,0,640,1,"RGB",CharPixel,pixels,exception); % % Deprecated, replace with: % % ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels, % exception); % % The format of the DispatchImage method is: % % unsigned int DispatchImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,const size_t columns, % const size_t rows,const char *map,const StorageType type, % void *pixels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset, y_offset, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha, C = cyan, Y = yellow, M = magenta, K = black, or % I = intensity (for grayscale). % % o type: Define the data type of the pixels. Float and double types are % normalized to [0..1] otherwise [0..QuantumRange]. Choose from these % types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, or % DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int DispatchImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const char *map,const StorageType type,void *pixels,ExceptionInfo *exception) { unsigned int status; if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6"); status=ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels, exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t r a c t S u b i m a g e F r o m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtractSubimageFromImageImage() extracts a region of the image that most % closely resembles the reference. % % The format of the ExtractSubimageFromImageImage method is: % % Image *ExtractSubimageFromImage(const Image *image, % const Image *reference,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const ssize_t x_offset,const ssize_t y_offset, const double similarity_threshold,ExceptionInfo *exception) { CacheView *image_view, *reference_view; double channels, normalized_similarity, similarity; ssize_t y; /* Compute the similarity in pixels between two images. */ normalized_similarity=1.0; similarity=0.0; channels=3; if ((image->matte != MagickFalse) && (reference->matte != MagickFalse)) channels++; if ((image->colorspace == CMYKColorspace) && (reference->colorspace == CMYKColorspace)) channels++; image_view=AcquireVirtualCacheView(image,exception); reference_view=AcquireVirtualCacheView(reference,exception); for (y=0; y < (ssize_t) reference->rows; y++) { register const IndexPacket *indexes, *reference_indexes; register const PixelPacket *p, *q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset+y, reference->columns,1,exception); q=GetCacheViewVirtualPixels(reference_view,0,y,reference->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); reference_indexes=GetCacheViewVirtualIndexQueue(reference_view); for (x=0; x < (ssize_t) reference->columns; x++) { MagickRealType pixel; pixel=QuantumScale*(GetPixelRed(p)-(double) GetPixelRed(q)); similarity+=pixel*pixel; pixel=QuantumScale*(GetPixelGreen(p)-(double) GetPixelGreen(q)); similarity+=pixel*pixel; pixel=QuantumScale*(GetPixelBlue(p)-(double) GetPixelBlue(q)); similarity+=pixel*pixel; if ((image->matte != MagickFalse) && (reference->matte != MagickFalse)) { pixel=QuantumScale*(GetPixelOpacity(p)-(double) GetPixelOpacity(q)); similarity+=pixel*pixel; } if ((image->colorspace == CMYKColorspace) && (reference->colorspace == CMYKColorspace)) { pixel=QuantumScale*(GetPixelIndex(indexes+x)-(double) GetPixelIndex(reference_indexes+x)); similarity+=pixel*pixel; } p++; q++; } normalized_similarity=sqrt(similarity)/reference->columns/reference->rows/ channels; if (normalized_similarity > similarity_threshold) break; } reference_view=DestroyCacheView(reference_view); image_view=DestroyCacheView(image_view); return(normalized_similarity); } MagickExport Image *ExtractSubimageFromImage(Image *image, const Image *reference,ExceptionInfo *exception) { double similarity_threshold; RectangleInfo offset; ssize_t y; /* Extract reference from image. */ if ((reference->columns > image->columns) || (reference->rows > image->rows)) return((Image *) NULL); similarity_threshold=(double) image->columns*image->rows; SetGeometry(reference,&offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows); y++) { double similarity; register ssize_t x; for (x=0; x < (ssize_t) (image->columns-reference->columns); x++) { similarity=GetSimilarityMetric(image,reference,x,y,similarity_threshold, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExtractSubimageFromImage) #endif if (similarity < similarity_threshold) { similarity_threshold=similarity; offset.x=x; offset.y=y; } } } if (similarity_threshold > (QuantumScale*reference->fuzz/100.0)) return((Image *) NULL); return(CropImage(image,&offset,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l a t t e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlattenImages() Obsolete Function: Use MergeImageLayers() instead. % % Deprecated, replace with: % % MergeImageLayers(image,FlattenLayer,exception); % % The format of the FlattenImage method is: % % Image *FlattenImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlattenImages(Image *image,ExceptionInfo *exception) { return(MergeImageLayers(image,FlattenLayer,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r m a t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FormatImageAttribute() permits formatted key/value pairs to be saved as an % image attribute. % % The format of the FormatImageAttribute method is: % % MagickBooleanType FormatImageAttribute(Image *image,const char *key, % const char *format,...) % % A description of each parameter follows. % % o image: The image. % % o key: The attribute key. % % o format: A string describing the format to use to write the remaining % arguments. % */ MagickExport MagickBooleanType FormatImageAttributeList(Image *image, const char *key,const char *format,va_list operands) { char value[MaxTextExtent]; int n; #if defined(MAGICKCORE_HAVE_VSNPRINTF) n=vsnprintf(value,MaxTextExtent,format,operands); #else n=vsprintf(value,format,operands); #endif if (n < 0) value[MaxTextExtent-1]='\0'; return(SetImageProperty(image,key,value)); } MagickExport MagickBooleanType FormatImagePropertyList(Image *image, const char *property,const char *format,va_list operands) { char value[MaxTextExtent]; int n; #if defined(MAGICKCORE_HAVE_VSNPRINTF) n=vsnprintf(value,MaxTextExtent,format,operands); #else n=vsprintf(value,format,operands); #endif if (n < 0) value[MaxTextExtent-1]='\0'; return(SetImageProperty(image,property,value)); } MagickExport MagickBooleanType FormatImageAttribute(Image *image, const char *key,const char *format,...) { char value[MaxTextExtent]; int n; va_list operands; va_start(operands,format); n=FormatLocaleStringList(value,MaxTextExtent,format,operands); (void) n; va_end(operands); return(SetImageProperty(image,key,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r m a t M a g i c k S t r i n g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FormatMagickString() prints formatted output of a variable argument list. % % The format of the FormatMagickString method is: % % ssize_t FormatMagickString(char *string,const size_t length, % const char *format,...) % % A description of each parameter follows. % % o string: FormatMagickString() returns the formatted string in this % character buffer. % % o length: the maximum length of the string. % % o format: A string describing the format to use to write the remaining % arguments. % */ MagickExport ssize_t FormatMagickStringList(char *string,const size_t length, const char *format,va_list operands) { int n; #if defined(MAGICKCORE_HAVE_VSNPRINTF) n=vsnprintf(string,length,format,operands); #else n=vsprintf(string,format,operands); #endif if (n < 0) string[length-1]='\0'; return((ssize_t) n); } MagickExport ssize_t FormatMagickString(char *string,const size_t length, const char *format,...) { ssize_t n; va_list operands; va_start(operands,format); n=(ssize_t) FormatMagickStringList(string,length,format,operands); va_end(operands); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r m a t S t r i n g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FormatString() prints formatted output of a variable argument list. % % The format of the FormatString method is: % % void FormatString(char *string,const char *format,...) % % A description of each parameter follows. % % o string: Method FormatString returns the formatted string in this % character buffer. % % o format: A string describing the format to use to write the remaining % arguments. % */ MagickExport void FormatStringList(char *string,const char *format, va_list operands) { int n; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); #if defined(MAGICKCORE_HAVE_VSNPRINTF) n=vsnprintf(string,MaxTextExtent,format,operands); #else n=vsprintf(string,format,operands); #endif if (n < 0) string[MaxTextExtent-1]='\0'; } MagickExport void FormatString(char *string,const char *format,...) { va_list operands; va_start(operands,format); (void) FormatLocaleStringList(string,MaxTextExtent,format,operands); va_end(operands); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F u z z y C o l o r M a t c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FuzzyColorMatch() returns true if two pixels are identical in color. % % The format of the ColorMatch method is: % % void FuzzyColorMatch(const PixelPacket *p,const PixelPacket *q, % const double fuzz) % % A description of each parameter follows: % % o p: Pixel p. % % o q: Pixel q. % % o distance: Define how much tolerance is acceptable to consider % two colors as the same. % */ MagickExport unsigned int FuzzyColorMatch(const PixelPacket *p, const PixelPacket *q,const double fuzz) { MagickPixelPacket pixel; register MagickRealType distance; if ((fuzz == 0.0) && (GetPixelRed(p) == GetPixelRed(q)) && (GetPixelGreen(p) == GetPixelGreen(q)) && (GetPixelBlue(p) == GetPixelBlue(q))) return(MagickTrue); pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q); distance=pixel.red*pixel.red; if (distance > (fuzz*fuzz)) return(MagickFalse); pixel.green=GetPixelGreen(p)-(MagickRealType) GetPixelGreen(q); distance+=pixel.green*pixel.green; if (distance > (fuzz*fuzz)) return(MagickFalse); pixel.blue=GetPixelBlue(p)-(MagickRealType) GetPixelBlue(q); distance+=pixel.blue*pixel.blue; if (distance > (fuzz*fuzz)) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F u z z y C o l o r C o m p a r e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FuzzyColorCompare() returns MagickTrue if the distance between two colors is % less than the specified distance in a linear three dimensional color space. % This method is used by ColorFloodFill() and other algorithms which % compare two colors. % % The format of the FuzzyColorCompare method is: % % void FuzzyColorCompare(const Image *image,const PixelPacket *p, % const PixelPacket *q) % % A description of each parameter follows: % % o image: the image. % % o p: Pixel p. % % o q: Pixel q. % */ MagickExport MagickBooleanType FuzzyColorCompare(const Image *image, const PixelPacket *p,const PixelPacket *q) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5"); return(IsColorSimilar(image,p,q)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F u z z y O p a c i t y C o m p a r e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FuzzyOpacityCompare() returns true if the distance between two opacity % values is less than the specified distance in a linear color space. This % method is used by MatteFloodFill() and other algorithms which compare % two opacity values. % % Deprecated, replace with: % % IsOpacitySimilar(image,p,q); % % The format of the FuzzyOpacityCompare method is: % % void FuzzyOpacityCompare(const Image *image,const PixelPacket *p, % const PixelPacket *q) % % A description of each parameter follows: % % o image: the image. % % o p: Pixel p. % % o q: Pixel q. % */ MagickExport MagickBooleanType FuzzyOpacityCompare(const Image *image, const PixelPacket *p,const PixelPacket *q) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5"); return(IsOpacitySimilar(image,p,q)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t C o n f i g u r e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetConfigureBlob() returns the specified configure file as a blob. % % The format of the GetConfigureBlob method is: % % void *GetConfigureBlob(const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o filename: the configure file name. % % o path: return the full path information of the configure file. % % o length: This pointer to a size_t integer sets the initial length of the % blob. On return, it reflects the actual length of the blob. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetConfigureBlob(const char *filename,char *path, size_t *length,ExceptionInfo *exception) { void *blob; assert(filename != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",filename); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); assert(path != (char *) NULL); assert(length != (size_t *) NULL); assert(exception != (ExceptionInfo *) NULL); blob=(void *) NULL; (void) CopyMagickString(path,filename,MaxTextExtent); #if defined(MAGICKCORE_INSTALLED_SUPPORT) #if defined(MAGICKCORE_LIBRARY_PATH) if (blob == (void *) NULL) { /* Search hard coded paths. */ (void) FormatLocaleString(path,MaxTextExtent,"%s%s", MAGICKCORE_LIBRARY_PATH,filename); if (IsPathAccessible(path) != MagickFalse) blob=FileToBlob(path,~0,length,exception); } #endif #if defined(MAGICKCORE_WINDOWS_SUPPORT) && !(defined(MAGICKCORE_CONFIGURE_PATH) || defined(MAGICKCORE_SHARE_PATH)) if (blob == (void *) NULL) { char *key_value; /* Locate file via registry key. */ key_value=NTRegistryKeyLookup("ConfigurePath"); if (key_value != (char *) NULL) { (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",key_value, DirectorySeparator,filename); if (IsPathAccessible(path) != MagickFalse) blob=FileToBlob(path,~0,length,exception); } } #endif #else if (blob == (void *) NULL) { char *home; home=GetEnvironmentValue("MAGICK_HOME"); if (home != (char *) NULL) { /* Search MAGICK_HOME. */ #if !defined(MAGICKCORE_POSIX_SUPPORT) (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",home, DirectorySeparator,filename); #else (void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",home, MAGICKCORE_LIBRARY_RELATIVE_PATH,filename); #endif if (IsPathAccessible(path) != MagickFalse) blob=FileToBlob(path,~0,length,exception); home=DestroyString(home); } home=GetEnvironmentValue("HOME"); if (home == (char *) NULL) home=GetEnvironmentValue("USERPROFILE"); if (home != (char *) NULL) { /* Search $HOME/.magick. */ (void) FormatLocaleString(path,MaxTextExtent,"%s%s.magick%s%s",home, DirectorySeparator,DirectorySeparator,filename); if ((IsPathAccessible(path) != MagickFalse) && (blob == (void *) NULL)) blob=FileToBlob(path,~0,length,exception); home=DestroyString(home); } } if ((blob == (void *) NULL) && (*GetClientPath() != '\0')) { #if !defined(MAGICKCORE_POSIX_SUPPORT) (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",GetClientPath(), DirectorySeparator,filename); #else char prefix[MaxTextExtent]; /* Search based on executable directory if directory is known. */ (void) CopyMagickString(prefix,GetClientPath(), MaxTextExtent); ChopPathComponents(prefix,1); (void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",prefix, MAGICKCORE_LIBRARY_RELATIVE_PATH,filename); #endif if (IsPathAccessible(path) != MagickFalse) blob=FileToBlob(path,~0,length,exception); } /* Search current directory. */ if ((blob == (void *) NULL) && (IsPathAccessible(path) != MagickFalse)) blob=FileToBlob(path,~0,length,exception); #if defined(MAGICKCORE_WINDOWS_SUPPORT) /* Search Windows registry. */ if (blob == (void *) NULL) blob=NTResourceToBlob(filename); #endif #endif if (blob == (void *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),ConfigureWarning, "UnableToOpenConfigureFile","`%s'",path); return(blob); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t C a c h e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCacheView() gets pixels from the in-memory or disk pixel cache as % defined by the geometry parameters. A pointer to the pixels is returned if % the pixels are transferred, otherwise a NULL is returned. % % Deprecated, replace with: % % GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows, % GetCacheViewException(cache_view)); % % The format of the GetCacheView method is: % % PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows) % % A description of each parameter follows: % % o cache_view: the address of a structure of type CacheView. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows) { PixelPacket *pixels; pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows, GetCacheViewException(cache_view)); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t C a c h e V i e w I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCacheViewIndexes() returns the indexes associated with the specified % view. % % Deprecated, replace with: % % GetCacheViewAuthenticIndexQueue(cache_view); % % The format of the GetCacheViewIndexes method is: % % IndexPacket *GetCacheViewIndexes(CacheView *cache_view) % % A description of each parameter follows: % % o cache_view: the cache view. % */ MagickExport IndexPacket *GetCacheViewIndexes(CacheView *cache_view) { return(GetCacheViewAuthenticIndexQueue(cache_view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t C a c h e V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as % defined by the geometry parameters. A pointer to the pixels is returned if % the pixels are transferred, otherwise a NULL is returned. % % Deprecated, replace with: % % GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows, % GetCacheViewException(cache_view)); % % The format of the GetCacheViewPixels method is: % % PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows) % % A description of each parameter follows: % % o cache_view: the cache view. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows) { PixelPacket *pixels; pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows, GetCacheViewException(cache_view)); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAttribute() searches the list of image attributes and returns % a pointer to the attribute if it exists otherwise NULL. % % The format of the GetImageAttribute method is: % % const ImageAttribute *GetImageAttribute(const Image *image, % const char *key) % % A description of each parameter follows: % % o image: the image. % % o key: These character strings are the name of an image attribute to % return. % */ static void *DestroyAttribute(void *attribute) { register ImageAttribute *p; p=(ImageAttribute *) attribute; if (p->value != (char *) NULL) p->value=DestroyString(p->value); return(RelinquishMagickMemory(p)); } MagickExport const ImageAttribute *GetImageAttribute(const Image *image, const char *key) { const char *value; ImageAttribute *attribute; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1"); value=GetImageProperty(image,key); if (value == (const char *) NULL) return((const ImageAttribute *) NULL); if (image->attributes == (void *) NULL) ((Image *) image)->attributes=NewSplayTree(CompareSplayTreeString, RelinquishMagickMemory,DestroyAttribute); else { const ImageAttribute *attribute; attribute=(const ImageAttribute *) GetValueFromSplayTree((SplayTreeInfo *) image->attributes,key); if (attribute != (const ImageAttribute *) NULL) return(attribute); } attribute=(ImageAttribute *) AcquireMagickMemory(sizeof(*attribute)); if (attribute == (ImageAttribute *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(attribute,0,sizeof(*attribute)); attribute->key=ConstantString(key); attribute->value=ConstantString(value); (void) AddValueToSplayTree((SplayTreeInfo *) ((Image *) image)->attributes, attribute->key,attribute); return((const ImageAttribute *) attribute); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C l i p p i n g P a t h A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageClippingPathAttribute() searches the list of image attributes and % returns a pointer to a clipping path if it exists otherwise NULL. % % Deprecated, replace with: % % GetImageAttribute(image,"8BIM:1999,2998"); % % The format of the GetImageClippingPathAttribute method is: % % const ImageAttribute *GetImageClippingPathAttribute(Image *image) % % A description of each parameter follows: % % o attribute: Method GetImageClippingPathAttribute returns the clipping % path if it exists otherwise NULL. % % o image: the image. % */ MagickExport const ImageAttribute *GetImageClippingPathAttribute(Image *image) { return(GetImageAttribute(image,"8BIM:1999,2998")); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e F r o m M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageFromMagickRegistry() gets an image from the registry as defined by % its name. If the image is not found, a NULL image is returned. % % Deprecated, replace with: % % GetImageRegistry(ImageRegistryType,name,exception); % % The format of the GetImageFromMagickRegistry method is: % % Image *GetImageFromMagickRegistry(const char *name,ssize_t *id, % ExceptionInfo *exception) % % A description of each parameter follows: % % o name: the name of the image to retrieve from the registry. % % o id: the registry id. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GetImageFromMagickRegistry(const char *name,ssize_t *id, ExceptionInfo *exception) { *id=0L; return((Image *) GetImageRegistry(ImageRegistryType,name,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMagickRegistry() gets a blob from the registry as defined by the id. If % the blob that matches the id is not found, NULL is returned. % % The format of the GetMagickRegistry method is: % % const void *GetMagickRegistry(const ssize_t id,RegistryType *type, % size_t *length,ExceptionInfo *exception) % % A description of each parameter follows: % % o id: the registry id. % % o type: the registry type. % % o length: the blob length in number of bytes. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetMagickRegistry(const ssize_t id,RegistryType *type, size_t *length,ExceptionInfo *exception) { char key[MaxTextExtent]; void *blob; *type=UndefinedRegistryType; *length=0; (void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id); blob=(void *) GetImageRegistry(ImageRegistryType,key,exception); if (blob != (void *) NULL) return(blob); blob=(void *) GetImageRegistry(ImageInfoRegistryType,key,exception); if (blob != (void *) NULL) return(blob); return((void *) GetImageRegistry(UndefinedRegistryType,key,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageGeometry() returns a region as defined by the geometry string with % respect to the image and its gravity. % % Deprecated, replace with: % % if (size_to_fit != MagickFalse) % ParseRegionGeometry(image,geometry,region_info,&image->exception); else % ParsePageGeometry(image,geometry,region_info,&image->exception); % % The format of the GetImageGeometry method is: % % int GetImageGeometry(Image *image,const char *geometry, % const unsigned int size_to_fit,RectangeInfo *region_info) % % A description of each parameter follows: % % o flags: Method GetImageGeometry returns a bitmask that indicates % which of the four values were located in the geometry string. % % o geometry: The geometry (e.g. 100x100+10+10). % % o size_to_fit: A value other than 0 means to scale the region so it % fits within the specified width and height. % % o region_info: the region as defined by the geometry string with % respect to the image and its gravity. % */ MagickExport int GetImageGeometry(Image *image,const char *geometry, const unsigned int size_to_fit,RectangleInfo *region_info) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.4"); if (size_to_fit != MagickFalse) return((int) ParseRegionGeometry(image,geometry,region_info,&image->exception)); return((int) ParsePageGeometry(image,geometry,region_info,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageList() returns an image at the specified position in the list. % % Deprecated, replace with: % % CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue, % exception); % % The format of the GetImageList method is: % % Image *GetImageList(const Image *images,const ssize_t offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o offset: the position within the list. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GetImageList(const Image *images,const ssize_t offset, ExceptionInfo *exception) { Image *image; if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); image=CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue, exception); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e L i s t I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageListIndex() returns the position in the list of the specified % image. % % Deprecated, replace with: % % GetImageIndexInList(images); % % The format of the GetImageListIndex method is: % % ssize_t GetImageListIndex(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport ssize_t GetImageListIndex(const Image *images) { if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(GetImageIndexInList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e L i s t S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageListSize() returns the number of images in the list. % % Deprecated, replace with: % % GetImageListLength(images); % % The format of the GetImageListSize method is: % % size_t GetImageListSize(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport size_t GetImageListSize(const Image *images) { if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(GetImageListLength(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a PixelPacket array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in RAM, or in a memory-mapped file. The returned pointer % should *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or if the storage class is % PseduoClass, call GetAuthenticIndexQueue() after invoking GetImagePixels() % to obtain the black color component or colormap indexes (of type IndexPacket) % corresponding to the region. Once the PixelPacket (and/or IndexPacket) % array has been updated, the changes must be saved back to the underlying % image using SyncAuthenticPixels() or they may be lost. % % Deprecated, replace with: % % GetAuthenticPixels(image,x,y,columns,rows,&image->exception); % % The format of the GetImagePixels() method is: % % PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows) { return(GetAuthenticPixels(image,x,y,columns,rows,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetIndexes() returns the black channel or the colormap indexes associated % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the black channel or colormap indexes are not available. % % Deprecated, replace with: % % GetAuthenticIndexQueue(image); % % The format of the GetIndexes() method is: % % IndexPacket *GetIndexes(const Image *image) % % A description of each parameter follows: % % o indexes: GetIndexes() returns the indexes associated with the last % call to QueueAuthenticPixels() or GetAuthenticPixels(). % % o image: the image. % */ MagickExport IndexPacket *GetIndexes(const Image *image) { return(GetAuthenticIndexQueue(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t M a g i c k G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMagickGeometry() is similar to GetGeometry() except the returned % geometry is modified as determined by the meta characters: %, !, <, >, % and ~. % % Deprecated, replace with: % % ParseMetaGeometry(geometry,x,y,width,height); % % The format of the GetMagickGeometry method is: % % unsigned int GetMagickGeometry(const char *geometry,ssize_t *x,ssize_t *y, % size_t *width,size_t *height) % % A description of each parameter follows: % % o geometry: Specifies a character string representing the geometry % specification. % % o x,y: A pointer to an integer. The x and y offset as determined by % the geometry specification is returned here. % % o width,height: A pointer to an unsigned integer. The width and height % as determined by the geometry specification is returned here. % */ MagickExport unsigned int GetMagickGeometry(const char *geometry,ssize_t *x, ssize_t *y,size_t *width,size_t *height) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3"); return(ParseMetaGeometry(geometry,x,y,width,height)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImage() returns the next image in a list. % % Deprecated, replace with: % % GetNextImageInList(images); % % The format of the GetNextImage method is: % % Image *GetNextImage(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport Image *GetNextImage(const Image *images) { if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(GetNextImageInList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageAttribute() gets the next image attribute. % % Deprecated, replace with: % % const char *property; % property=GetNextImageProperty(image); % if (property != (const char *) NULL) % GetImageAttribute(image,property); % % The format of the GetNextImageAttribute method is: % % const ImageAttribute *GetNextImageAttribute(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const ImageAttribute *GetNextImageAttribute(const Image *image) { const char *property; property=GetNextImageProperty(image); if (property == (const char *) NULL) return((const ImageAttribute *) NULL); return(GetImageAttribute(image,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N u m b e r S c e n e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNumberScenes() returns the number of images in the list. % % Deprecated, replace with: % % GetImageListLength(image); % % The format of the GetNumberScenes method is: % % unsigned int GetNumberScenes(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport unsigned int GetNumberScenes(const Image *image) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return((unsigned int) GetImageListLength(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOnePixel() returns a single pixel at the specified (x,y) location. % The image background color is returned if an error occurs. % % Deprecated, replace with: % % GetOneAuthenticPixel(image,x,y,&pixel,&image->exception); % % The format of the GetOnePixel() method is: % % PixelPacket GetOnePixel(const Image image,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % */ MagickExport PixelPacket GetOnePixel(Image *image,const ssize_t x,const ssize_t y) { PixelPacket pixel; (void) GetOneAuthenticPixel(image,x,y,&pixel,&image->exception); return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixels() returns the pixels associated with the last call to % QueueAuthenticPixels() or GetAuthenticPixels(). % % Deprecated, replace with: % % GetAuthenticPixelQueue(image); % % The format of the GetPixels() method is: % % PixelPacket *GetPixels(const Image image) % % A description of each parameter follows: % % o pixels: GetPixels() returns the pixels associated with the last call % to QueueAuthenticPixels() or GetAuthenticPixels(). % % o image: the image. % */ MagickExport PixelPacket *GetPixels(const Image *image) { return(GetAuthenticPixelQueue(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P r e v i o u s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPreviousImage() returns the previous image in a list. % % Deprecated, replace with: % % GetPreviousImageInList(images)); % % The format of the GetPreviousImage method is: % % Image *GetPreviousImage(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport Image *GetPreviousImage(const Image *images) { if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(GetPreviousImageInList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H S L T r a n s f o r m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HSLTransform() converts a (hue, saturation, lightness) to a (red, green, % blue) triple. % % The format of the HSLTransformImage method is: % % void HSLTransform(const double hue,const double saturation, % const double lightness,Quantum *red,Quantum *green,Quantum *blue) % % A description of each parameter follows: % % o hue, saturation, lightness: A double value representing a % component of the HSL color space. % % o red, green, blue: A pointer to a pixel component of type Quantum. % */ static inline MagickRealType HueToRGB(MagickRealType m1,MagickRealType m2, MagickRealType hue) { if (hue < 0.0) hue+=1.0; if (hue > 1.0) hue-=1.0; if ((6.0*hue) < 1.0) return(m1+6.0*(m2-m1)*hue); if ((2.0*hue) < 1.0) return(m2); if ((3.0*hue) < 2.0) return(m1+6.0*(m2-m1)*(2.0/3.0-hue)); return(m1); } MagickExport void HSLTransform(const double hue,const double saturation, const double lightness,Quantum *red,Quantum *green,Quantum *blue) { MagickRealType b, g, r, m1, m2; /* Convert HSL to RGB colorspace. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); if (lightness <= 0.5) m2=lightness*(saturation+1.0); else m2=lightness+saturation-lightness*saturation; m1=2.0*lightness-m2; r=HueToRGB(m1,m2,hue+1.0/3.0); g=HueToRGB(m1,m2,hue); b=HueToRGB(m1,m2,hue-1.0/3.0); *red=ClampToQuantum((MagickRealType) QuantumRange*r); *green=ClampToQuantum((MagickRealType) QuantumRange*g); *blue=ClampToQuantum((MagickRealType) QuantumRange*b); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i t y A f f i n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentityAffine() initializes the affine transform to the identity matrix. % % The format of the IdentityAffine method is: % % IdentityAffine(AffineMatrix *affine) % % A description of each parameter follows: % % o affine: A pointer the affine transform of type AffineMatrix. % */ MagickExport void IdentityAffine(AffineMatrix *affine) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); assert(affine != (AffineMatrix *) NULL); (void) ResetMagickMemory(affine,0,sizeof(AffineMatrix)); affine->sx=1.0; affine->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n i t i a l i z e M a g i c k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeMagick() initializes the ImageMagick environment. % % Deprecated, replace with: % % MagickCoreGenesis(path,MagickFalse); % % The format of the InitializeMagick function is: % % InitializeMagick(const char *path) % % A description of each parameter follows: % % o path: the execution path of the current ImageMagick client. % */ MagickExport void InitializeMagick(const char *path) { MagickCoreGenesis(path,MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t e P i x e l C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolatePixelColor() applies bi-linear or tri-linear interpolation % between a pixel and it's neighbors. % % The format of the InterpolatePixelColor method is: % % MagickPixelPacket InterpolatePixelColor(const Image *image, % CacheView *view_info,InterpolatePixelMethod method,const double x, % const double y,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o image_view: the image cache view. % % o type: the type of pixel color interpolation. % % o x,y: A double representing the current (x,y) position of the pixel. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } static void BicubicInterpolate(const MagickPixelPacket *pixels,const double dx, MagickPixelPacket *pixel) { MagickRealType dx2, p, q, r, s; dx2=dx*dx; p=(pixels[3].red-pixels[2].red)-(pixels[0].red-pixels[1].red); q=(pixels[0].red-pixels[1].red)-p; r=pixels[2].red-pixels[0].red; s=pixels[1].red; pixel->red=(dx*dx2*p)+(dx2*q)+(dx*r)+s; p=(pixels[3].green-pixels[2].green)-(pixels[0].green-pixels[1].green); q=(pixels[0].green-pixels[1].green)-p; r=pixels[2].green-pixels[0].green; s=pixels[1].green; pixel->green=(dx*dx2*p)+(dx2*q)+(dx*r)+s; p=(pixels[3].blue-pixels[2].blue)-(pixels[0].blue-pixels[1].blue); q=(pixels[0].blue-pixels[1].blue)-p; r=pixels[2].blue-pixels[0].blue; s=pixels[1].blue; pixel->blue=(dx*dx2*p)+(dx2*q)+(dx*r)+s; p=(pixels[3].opacity-pixels[2].opacity)-(pixels[0].opacity-pixels[1].opacity); q=(pixels[0].opacity-pixels[1].opacity)-p; r=pixels[2].opacity-pixels[0].opacity; s=pixels[1].opacity; pixel->opacity=(dx*dx2*p)+(dx2*q)+(dx*r)+s; if (pixel->colorspace == CMYKColorspace) { p=(pixels[3].index-pixels[2].index)-(pixels[0].index-pixels[1].index); q=(pixels[0].index-pixels[1].index)-p; r=pixels[2].index-pixels[0].index; s=pixels[1].index; pixel->index=(dx*dx2*p)+(dx2*q)+(dx*r)+s; } } static inline MagickRealType CubicWeightingFunction(const MagickRealType x) { MagickRealType alpha, gamma; alpha=MagickMax(x+2.0,0.0); gamma=1.0*alpha*alpha*alpha; alpha=MagickMax(x+1.0,0.0); gamma-=4.0*alpha*alpha*alpha; alpha=MagickMax(x+0.0,0.0); gamma+=6.0*alpha*alpha*alpha; alpha=MagickMax(x-1.0,0.0); gamma-=4.0*alpha*alpha*alpha; return(gamma/6.0); } static inline double MeshInterpolate(const PointInfo *delta,const double p, const double x,const double y) { return(delta->x*x+delta->y*y+(1.0-delta->x-delta->y)*p); } static inline ssize_t NearestNeighbor(MagickRealType x) { if (x >= 0.0) return((ssize_t) (x+0.5)); return((ssize_t) (x-0.5)); } MagickExport MagickPixelPacket InterpolatePixelColor(const Image *image, CacheView *image_view,const InterpolatePixelMethod method,const double x, const double y,ExceptionInfo *exception) { MagickPixelPacket pixel; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image_view != (CacheView *) NULL); GetMagickPixelPacket(image,&pixel); switch (method) { case AverageInterpolatePixel: { double gamma; MagickPixelPacket pixels[16]; MagickRealType alpha[16]; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t) floor(y)-1,4,4,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 16L; i++) { GetMagickPixelPacket(image,pixels+i); SetMagickPixelPacket(image,p,indexes+i,pixels+i); alpha[i]=1.0; if (image->matte != MagickFalse) { alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[i].red*=alpha[i]; pixels[i].green*=alpha[i]; pixels[i].blue*=alpha[i]; if (image->colorspace == CMYKColorspace) pixels[i].index*=alpha[i]; } gamma=alpha[i]; gamma=PerceptibleReciprocal(gamma); pixel.red+=gamma*0.0625*pixels[i].red; pixel.green+=gamma*0.0625*pixels[i].green; pixel.blue+=gamma*0.0625*pixels[i].blue; pixel.opacity+=0.0625*pixels[i].opacity; if (image->colorspace == CMYKColorspace) pixel.index+=gamma*0.0625*pixels[i].index; p++; } break; } case BicubicInterpolatePixel: { MagickPixelPacket pixels[16], u[4]; MagickRealType alpha[16]; PointInfo delta; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t) floor(y)-1,4,4,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 16L; i++) { GetMagickPixelPacket(image,pixels+i); SetMagickPixelPacket(image,p,indexes+i,pixels+i); alpha[i]=1.0; if (image->matte != MagickFalse) { alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[i].red*=alpha[i]; pixels[i].green*=alpha[i]; pixels[i].blue*=alpha[i]; if (image->colorspace == CMYKColorspace) pixels[i].index*=alpha[i]; } p++; } delta.x=x-floor(x); for (i=0; i < 4L; i++) BicubicInterpolate(pixels+4*i,delta.x,u+i); delta.y=y-floor(y); BicubicInterpolate(u,delta.y,&pixel); break; } case BilinearInterpolatePixel: default: { double gamma; MagickPixelPacket pixels[16]; MagickRealType alpha[16]; PointInfo delta; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t) floor(y),2,2,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 4L; i++) { GetMagickPixelPacket(image,pixels+i); SetMagickPixelPacket(image,p,indexes+i,pixels+i); alpha[i]=1.0; if (image->matte != MagickFalse) { alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[i].red*=alpha[i]; pixels[i].green*=alpha[i]; pixels[i].blue*=alpha[i]; if (image->colorspace == CMYKColorspace) pixels[i].index*=alpha[i]; } p++; } delta.x=x-floor(x); delta.y=y-floor(y); gamma=(((1.0-delta.y)*((1.0-delta.x)*alpha[0]+delta.x*alpha[1])+delta.y* ((1.0-delta.x)*alpha[2]+delta.x*alpha[3]))); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].red+delta.x* pixels[1].red)+delta.y*((1.0-delta.x)*pixels[2].red+delta.x* pixels[3].red)); pixel.green=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].green+delta.x* pixels[1].green)+delta.y*((1.0-delta.x)*pixels[2].green+ delta.x*pixels[3].green)); pixel.blue=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].blue+delta.x* pixels[1].blue)+delta.y*((1.0-delta.x)*pixels[2].blue+delta.x* pixels[3].blue)); pixel.opacity=((1.0-delta.y)*((1.0-delta.x)*pixels[0].opacity+delta.x* pixels[1].opacity)+delta.y*((1.0-delta.x)*pixels[2].opacity+delta.x* pixels[3].opacity)); if (image->colorspace == CMYKColorspace) pixel.index=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].index+delta.x* pixels[1].index)+delta.y*((1.0-delta.x)*pixels[2].index+delta.x* pixels[3].index)); break; } case FilterInterpolatePixel: { Image *excerpt_image, *filter_image; MagickPixelPacket pixels[1]; RectangleInfo geometry; geometry.width=4L; geometry.height=4L; geometry.x=(ssize_t) floor(x)-1L; geometry.y=(ssize_t) floor(y)-1L; excerpt_image=ExcerptImage(image,&geometry,exception); if (excerpt_image == (Image *) NULL) break; filter_image=ResizeImage(excerpt_image,1,1,image->filter,image->blur, exception); excerpt_image=DestroyImage(excerpt_image); if (filter_image == (Image *) NULL) break; p=GetVirtualPixels(filter_image,0,0,1,1,exception); if (p == (const PixelPacket *) NULL) { filter_image=DestroyImage(filter_image); break; } indexes=GetVirtualIndexQueue(filter_image); GetMagickPixelPacket(image,pixels); SetMagickPixelPacket(image,p,indexes,&pixel); filter_image=DestroyImage(filter_image); break; } case IntegerInterpolatePixel: { MagickPixelPacket pixels[1]; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t) floor(y),1,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); GetMagickPixelPacket(image,pixels); SetMagickPixelPacket(image,p,indexes,&pixel); break; } case MeshInterpolatePixel: { double gamma; MagickPixelPacket pixels[4]; MagickRealType alpha[4]; PointInfo delta, luminance; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t) floor(y),2,2,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 4L; i++) { GetMagickPixelPacket(image,pixels+i); SetMagickPixelPacket(image,p,indexes+i,pixels+i); alpha[i]=1.0; if (image->matte != MagickFalse) { alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[i].red*=alpha[i]; pixels[i].green*=alpha[i]; pixels[i].blue*=alpha[i]; if (image->colorspace == CMYKColorspace) pixels[i].index*=alpha[i]; } p++; } delta.x=x-floor(x); delta.y=y-floor(y); luminance.x=MagickPixelLuminance(pixels+0)-MagickPixelLuminance(pixels+3); luminance.y=MagickPixelLuminance(pixels+1)-MagickPixelLuminance(pixels+2); if (fabs(luminance.x) < fabs(luminance.y)) { /* Diagonal 0-3 NW-SE. */ if (delta.x <= delta.y) { /* Bottom-left triangle (pixel:2, diagonal: 0-3). */ delta.y=1.0-delta.y; gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*MeshInterpolate(&delta,pixels[2].red, pixels[3].red,pixels[0].red); pixel.green=gamma*MeshInterpolate(&delta,pixels[2].green, pixels[3].green,pixels[0].green); pixel.blue=gamma*MeshInterpolate(&delta,pixels[2].blue, pixels[3].blue,pixels[0].blue); pixel.opacity=gamma*MeshInterpolate(&delta,pixels[2].opacity, pixels[3].opacity,pixels[0].opacity); if (image->colorspace == CMYKColorspace) pixel.index=gamma*MeshInterpolate(&delta,pixels[2].index, pixels[3].index,pixels[0].index); } else { /* Top-right triangle (pixel:1, diagonal: 0-3). */ delta.x=1.0-delta.x; gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*MeshInterpolate(&delta,pixels[1].red, pixels[0].red,pixels[3].red); pixel.green=gamma*MeshInterpolate(&delta,pixels[1].green, pixels[0].green,pixels[3].green); pixel.blue=gamma*MeshInterpolate(&delta,pixels[1].blue, pixels[0].blue,pixels[3].blue); pixel.opacity=gamma*MeshInterpolate(&delta,pixels[1].opacity, pixels[0].opacity,pixels[3].opacity); if (image->colorspace == CMYKColorspace) pixel.index=gamma*MeshInterpolate(&delta,pixels[1].index, pixels[0].index,pixels[3].index); } } else { /* Diagonal 1-2 NE-SW. */ if (delta.x <= (1.0-delta.y)) { /* Top-left triangle (pixel 0, diagonal: 1-2). */ gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*MeshInterpolate(&delta,pixels[0].red, pixels[1].red,pixels[2].red); pixel.green=gamma*MeshInterpolate(&delta,pixels[0].green, pixels[1].green,pixels[2].green); pixel.blue=gamma*MeshInterpolate(&delta,pixels[0].blue, pixels[1].blue,pixels[2].blue); pixel.opacity=gamma*MeshInterpolate(&delta,pixels[0].opacity, pixels[1].opacity,pixels[2].opacity); if (image->colorspace == CMYKColorspace) pixel.index=gamma*MeshInterpolate(&delta,pixels[0].index, pixels[1].index,pixels[2].index); } else { /* Bottom-right triangle (pixel: 3, diagonal: 1-2). */ delta.x=1.0-delta.x; delta.y=1.0-delta.y; gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*MeshInterpolate(&delta,pixels[3].red, pixels[2].red,pixels[1].red); pixel.green=gamma*MeshInterpolate(&delta,pixels[3].green, pixels[2].green,pixels[1].green); pixel.blue=gamma*MeshInterpolate(&delta,pixels[3].blue, pixels[2].blue,pixels[1].blue); pixel.opacity=gamma*MeshInterpolate(&delta,pixels[3].opacity, pixels[2].opacity,pixels[1].opacity); if (image->colorspace == CMYKColorspace) pixel.index=gamma*MeshInterpolate(&delta,pixels[3].index, pixels[2].index,pixels[1].index); } } break; } case NearestNeighborInterpolatePixel: { MagickPixelPacket pixels[1]; p=GetCacheViewVirtualPixels(image_view,NearestNeighbor(x), NearestNeighbor(y),1,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); GetMagickPixelPacket(image,pixels); SetMagickPixelPacket(image,p,indexes,&pixel); break; } case SplineInterpolatePixel: { double gamma; MagickPixelPacket pixels[16]; MagickRealType alpha[16], dx, dy; PointInfo delta; ssize_t j, n; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t) floor(y)-1,4,4,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); n=0; delta.x=x-floor(x); delta.y=y-floor(y); for (i=(-1); i < 3L; i++) { dy=CubicWeightingFunction((MagickRealType) i-delta.y); for (j=(-1); j < 3L; j++) { GetMagickPixelPacket(image,pixels+n); SetMagickPixelPacket(image,p,indexes+n,pixels+n); alpha[n]=1.0; if (image->matte != MagickFalse) { alpha[n]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[n].red*=alpha[n]; pixels[n].green*=alpha[n]; pixels[n].blue*=alpha[n]; if (image->colorspace == CMYKColorspace) pixels[n].index*=alpha[n]; } dx=CubicWeightingFunction(delta.x-(MagickRealType) j); gamma=alpha[n]; gamma=PerceptibleReciprocal(gamma); pixel.red+=gamma*dx*dy*pixels[n].red; pixel.green+=gamma*dx*dy*pixels[n].green; pixel.blue+=gamma*dx*dy*pixels[n].blue; if (image->matte != MagickFalse) pixel.opacity+=dx*dy*pixels[n].opacity; if (image->colorspace == CMYKColorspace) pixel.index+=gamma*dx*dy*pixels[n].index; n++; p++; } } break; } } return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e A t t r i b u t e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageAttributes() replaces any embedded formatting characters with % the appropriate image attribute and returns the translated text. % % Deprecated, replace with: % % InterpretImageProperties(image_info,image,embed_text); % % The format of the InterpretImageAttributes method is: % % char *InterpretImageAttributes(const ImageInfo *image_info,Image *image, % const char *embed_text) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o embed_text: the address of a character string containing the embedded % formatting characters. % */ MagickExport char *InterpretImageAttributes(const ImageInfo *image_info, Image *image,const char *embed_text) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1"); return(InterpretImageProperties(image_info,image,embed_text)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e s R G B C o m p a n d o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InversesRGBCompandor() removes the gamma function from a sRGB pixel. % % The format of the InversesRGBCompandor method is: % % MagickRealType InversesRGBCompandor(const MagickRealType pixel) % % A description of each parameter follows: % % o pixel: the pixel. % */ MagickExport MagickRealType InversesRGBCompandor(const MagickRealType pixel) { if (pixel <= (0.0404482362771076*QuantumRange)) return(pixel/12.92); return(QuantumRange*pow((QuantumScale*pixel+0.055)/1.055,2.4)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I s S u b i m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsSubimage() returns MagickTrue if the geometry is a valid subimage % specification (e.g. [1], [1-9], [1,7,4]). % % The format of the IsSubimage method is: % % unsigned int IsSubimage(const char *geometry,const unsigned int pedantic) % % A description of each parameter follows: % % o geometry: This string is the geometry specification. % % o pedantic: A value other than 0 invokes a more restrictive set of % conditions for a valid specification (e.g. [1], [1-4], [4-1]). % */ MagickExport unsigned int IsSubimage(const char *geometry, const unsigned int pedantic) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (geometry == (const char *) NULL) return(MagickFalse); if ((strchr(geometry,'x') != (char *) NULL) || (strchr(geometry,'X') != (char *) NULL)) return(MagickFalse); if ((pedantic != MagickFalse) && (strchr(geometry,',') != (char *) NULL)) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() will map the given color to "black" and "white" % values, limearly spreading out the colors, and level values on a channel by % channel bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % Deprecated, replace with: % % LevelColorsImageChannel(image,channel,black_color,white_color,invert); % % The format of the LevelImageColors method is: % % MagickBooleanType LevelImageColors(Image *image,const ChannelType channel, % const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickBooleanType LevelImageColors(Image *image,const ChannelType channel, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { return(LevelColorsImageChannel(image,channel,black_color,white_color,invert)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i b e r a t e M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiberateMemory() frees memory that has already been allocated, and NULL's % the pointer to it. % % The format of the LiberateMemory method is: % % void LiberateMemory(void **memory) % % A description of each parameter follows: % % o memory: A pointer to a block of memory to free for reuse. % */ MagickExport void LiberateMemory(void **memory) { assert(memory != (void **) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (*memory == (void *) NULL) return; free(*memory); *memory=(void *) NULL; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i b e r a t e S e m a p h o r e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiberateSemaphoreInfo() relinquishes a semaphore. % % Deprecated, replace with: % % UnlockSemaphoreInfo(*semaphore_info); % % The format of the LiberateSemaphoreInfo method is: % % LiberateSemaphoreInfo(void **semaphore_info) % % A description of each parameter follows: % % o semaphore_info: Specifies a pointer to an SemaphoreInfo structure. % */ MagickExport void LiberateSemaphoreInfo(SemaphoreInfo **semaphore_info) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); UnlockSemaphoreInfo(*semaphore_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k I n c a r n a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickIncarnate() initializes the ImageMagick environment. % % Deprecated, replace with: % % MagickCoreGenesis(path,MagickFalse); % % The format of the MagickIncarnate function is: % % MagickIncarnate(const char *path) % % A description of each parameter follows: % % o path: the execution path of the current ImageMagick client. % */ MagickExport void MagickIncarnate(const char *path) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1"); MagickCoreGenesis(path,MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o n i t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMonitor() calls the monitor handler method with a text string that % describes the task and a measure of completion. The method returns % MagickTrue on success otherwise MagickFalse if an error is encountered, e.g. % if there was a user interrupt. % % The format of the MagickMonitor method is: % % MagickBooleanType MagickMonitor(const char *text, % const MagickOffsetType offset,const MagickSizeType span, % void *client_data) % % A description of each parameter follows: % % o offset: the position relative to the span parameter which represents % how much progress has been made toward completing a task. % % o span: the span relative to completing a task. % % o client_data: the client data. % */ MagickExport MagickBooleanType MagickMonitor(const char *text, const MagickOffsetType offset,const MagickSizeType span, void *magick_unused(client_data)) { ExceptionInfo *exception; MagickBooleanType status; assert(text != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",text); ProcessPendingEvents(text); status=MagickTrue; exception=AcquireExceptionInfo(); if (monitor_handler != (MonitorHandler) NULL) status=(*monitor_handler)(text,offset,span,exception); exception=DestroyExceptionInfo(exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MapImage() replaces the colors of an image with the closest color from a % reference image. % % Deprecated, replace with: % % QuantizeInfo quantize_info; % GetQuantizeInfo(&quantize_info); % quantize_info.dither=dither; % RemapImage(&quantize_info,image,map_image); % % The format of the MapImage method is: % % MagickBooleanType MapImage(Image *image,const Image *map_image, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o map_image: the image. Reduce image to a set of colors represented by % this image. % % o dither: Set this integer value to something other than zero to % dither the mapped image. % */ MagickExport MagickBooleanType MapImage(Image *image,const Image *map_image, const MagickBooleanType dither) { QuantizeInfo quantize_info; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(map_image != (Image *) NULL); assert(map_image->signature == MagickSignature); GetQuantizeInfo(&quantize_info); quantize_info.dither=dither; return(RemapImage(&quantize_info,image,map_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MapImages() replaces the colors of a sequence of images with the closest % color from a reference image. % % Deprecated, replace with: % % QuantizeInfo quantize_info; % GetQuantizeInfo(&quantize_info); % quantize_info.dither=dither; % RemapImages(&quantize_info,images,map_image); % % The format of the MapImage method is: % % MagickBooleanType MapImages(Image *images,Image *map_image, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to a set of Image structures. % % o map_image: the image. Reduce image to a set of colors represented by % this image. % % o dither: Set this integer value to something other than zero to % dither the quantized image. % */ MagickExport MagickBooleanType MapImages(Image *images,const Image *map_image, const MagickBooleanType dither) { QuantizeInfo quantize_info; assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); GetQuantizeInfo(&quantize_info); quantize_info.dither=dither; return(RemapImages(&quantize_info,images,map_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MatteFloodfill() changes the transparency value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod % is specified, the transparency value is changed for any neighbor pixel % that does not match the bordercolor member of image. % % By default target must match a particular pixel transparency exactly. % However, in many cases two transparency values may differ by a % small amount. The fuzz member of image defines how much tolerance is % acceptable to consider two transparency values as the same. For example, % set fuzz to 10 and the opacity values of 100 and 102 respectively are % now interpreted as the same value for the purposes of the floodfill. % % The format of the MatteFloodfillImage method is: % % MagickBooleanType MatteFloodfillImage(Image *image, % const PixelPacket target,const Quantum opacity,const ssize_t x_offset, % const ssize_t y_offset,const PaintMethod method) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o opacity: the level of transparency: 0 is fully opaque and QuantumRange is % fully transparent. % % o x,y: the starting location of the operation. % % o method: Choose either FloodfillMethod or FillToBorderMethod. % */ MagickExport MagickBooleanType MatteFloodfillImage(Image *image, const PixelPacket target,const Quantum opacity,const ssize_t x_offset, const ssize_t y_offset,const PaintMethod method) { Image *floodplane_image; MagickBooleanType skip; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue, &image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); /* Set floodfill color. */ segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize, sizeof(*segment_stack)); if (segment_stack == (SegmentInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Push initial segment on stack. */ x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); while (s > segment_stack) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception); q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) == MagickFalse) break; } else if (IsColorSimilar(image,p,&target) != MagickFalse) break; q->opacity=(Quantum) TransparentOpacity; q--; p--; } if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetVirtualPixels(image,x,y,image->columns-x,1, &image->exception); q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) == MagickFalse) break; } else if (IsColorSimilar(image,p,&target) != MagickFalse) break; q->opacity=(Quantum) TransparentOpacity; q++; p++; } if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1, &image->exception); q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) != MagickFalse) break; } else if (IsColorSimilar(image,p,&target) == MagickFalse) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; /* Tile fill color onto floodplane. */ p=GetVirtualPixels(floodplane_image,0,y,image->columns,1, &image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) q->opacity=opacity; p++; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaximumImages() returns the maximum intensity of an image sequence. % % Deprecated, replace with: % % EvaluateImages(images,MinEvaluateOperator,exception); % % The format of the MaxImages method is: % % Image *MaximumImages(Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MaximumImages(const Image *images,ExceptionInfo *exception) { return(EvaluateImages(images,MinEvaluateOperator,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinimumImages() returns the minimum intensity of an image sequence. % % Deprecated, replace with: % % EvaluateImages(images,MinEvaluateOperator,exception); % % The format of the MinimumImages method is: % % Image *MinimumImages(Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinimumImages(const Image *images,ExceptionInfo *exception) { return(EvaluateImages(images,MinEvaluateOperator,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The algorithm was contributed by Mike Edmonds and implements an insertion % sort for selecting median color-channel values. For more on this algorithm % see "Skip Lists: A probabilistic Alternative to Balanced Trees" by William % Pugh in the June 1990 of Communications of the ACM. % % The format of the MedianFilterImage method is: % % Image *MedianFilterImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MedianFilterImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *median_image; median_image=StatisticImage(image,MedianStatistic,(size_t) radius,(size_t) radius,exception); return(median_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModeImage() makes each pixel the 'predominant color' of the neighborhood % of the specified radius. % % The format of the ModeImage method is: % % Image *ModeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ModeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *mode_image; mode_image=StatisticImage(image,ModeStatistic,(size_t) radius,(size_t) radius, exception); return(mode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MosaicImages() Obsolete Function: Use MergeImageLayers() instead. % % Deprecated, replace with: % % MergeImageLayers(image,MosaicLayer,exception); % % The format of the MosaicImage method is: % % Image *MosaicImages(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image list to be composited together % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MosaicImages(Image *image,ExceptionInfo *exception) { return(MergeImageLayers(image,MosaicLayer,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaqueImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaqueImage method is: % % MagickBooleanType OpaqueImage(Image *image, % const PixelPacket *target,const PixelPacket fill) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o fill: the replacement color. % */ MagickExport MagickBooleanType OpaqueImage(Image *image, const PixelPacket target,const PixelPacket fill) { #define OpaqueImageTag "Opaque/Image" MagickBooleanType proceed; register ssize_t i; ssize_t y; /* Make image color opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0"); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); switch (image->storage_class) { case DirectClass: default: { /* Make DirectClass image opaque. */ for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) != MagickFalse) *q=fill; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; proceed=SetImageProgress(image,OpaqueImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } break; } case PseudoClass: { /* Make PseudoClass image opaque. */ for (i=0; i < (ssize_t) image->colors; i++) { if (IsColorSimilar(image,&image->colormap[i],&target) != MagickFalse) image->colormap[i]=fill; } if (fill.opacity != OpaqueOpacity) { for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) != MagickFalse) q->opacity=fill.opacity; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } } (void) SyncImage(image); break; } } if (fill.opacity != OpaqueOpacity) image->matte=MagickTrue; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p e n C a c h e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenCacheView() opens a view into the pixel cache, using the % VirtualPixelMethod that is defined within the given image itself. % % Deprecated, replace with: % % AcquireVirtualCacheView(image,&image->exception); % % The format of the OpenCacheView method is: % % CacheView *OpenCacheView(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheView *OpenCacheView(const Image *image) { return(AcquireVirtualCacheView(image,&((Image *) image)->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p e n M a g i c k S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenMagickStream() opens the file at the specified path and return the % associated stream. % % The path of the OpenMagickStream method is: % % FILE *OpenMagickStream(const char *path,const char *mode) % % A description of each parameter follows. % % o path: the file path. % % o mode: the file mode. % */ #if defined(MAGICKCORE_HAVE__WFOPEN) static size_t UTF8ToUTF16(const unsigned char *utf8,wchar_t *utf16) { register const unsigned char *p; if (utf16 != (wchar_t *) NULL) { register wchar_t *q; wchar_t c; /* Convert UTF-8 to UTF-16. */ q=utf16; for (p=utf8; *p != '\0'; p++) { if ((*p & 0x80) == 0) *q=(*p); else if ((*p & 0xE0) == 0xC0) { c=(*p); *q=(c & 0x1F) << 6; p++; if ((*p & 0xC0) != 0x80) return(0); *q|=(*p & 0x3F); } else if ((*p & 0xF0) == 0xE0) { c=(*p); *q=c << 12; p++; if ((*p & 0xC0) != 0x80) return(0); c=(*p); *q|=(c & 0x3F) << 6; p++; if ((*p & 0xC0) != 0x80) return(0); *q|=(*p & 0x3F); } else return(0); q++; } *q++='\0'; return(q-utf16); } /* Compute UTF-16 string length. */ for (p=utf8; *p != '\0'; p++) { if ((*p & 0x80) == 0) ; else if ((*p & 0xE0) == 0xC0) { p++; if ((*p & 0xC0) != 0x80) return(0); } else if ((*p & 0xF0) == 0xE0) { p++; if ((*p & 0xC0) != 0x80) return(0); p++; if ((*p & 0xC0) != 0x80) return(0); } else return(0); } return(p-utf8); } static wchar_t *ConvertUTF8ToUTF16(const unsigned char *source) { size_t length; wchar_t *utf16; length=UTF8ToUTF16(source,(wchar_t *) NULL); if (length == 0) { register ssize_t i; /* Not UTF-8, just copy. */ length=strlen((const char *) source); utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16)); if (utf16 == (wchar_t *) NULL) return((wchar_t *) NULL); for (i=0; i <= (ssize_t) length; i++) utf16[i]=source[i]; return(utf16); } utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16)); if (utf16 == (wchar_t *) NULL) return((wchar_t *) NULL); length=UTF8ToUTF16(source,utf16); return(utf16); } #endif MagickExport FILE *OpenMagickStream(const char *path,const char *mode) { FILE *file; if ((path == (const char *) NULL) || (mode == (const char *) NULL)) { errno=EINVAL; return((FILE *) NULL); } file=(FILE *) NULL; #if defined(MAGICKCORE_HAVE__WFOPEN) { wchar_t *unicode_mode, *unicode_path; unicode_path=ConvertUTF8ToUTF16((const unsigned char *) path); if (unicode_path == (wchar_t *) NULL) return((FILE *) NULL); unicode_mode=ConvertUTF8ToUTF16((const unsigned char *) mode); if (unicode_mode == (wchar_t *) NULL) { unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path); return((FILE *) NULL); } file=_wfopen(unicode_path,unicode_mode); unicode_mode=(wchar_t *) RelinquishMagickMemory(unicode_mode); unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path); } #endif if (file == (FILE *) NULL) file=fopen(path,mode); return(file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PaintFloodfill() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % Deprecated, replace with: % % FloodfillPaintImage(image,channel,draw_info,target,x,y, % method == FloodfillMethod ? MagickFalse : MagickTrue); % % The format of the PaintFloodfillImage method is: % % MagickBooleanType PaintFloodfillImage(Image *image, % const ChannelType channel,const MagickPixelPacket target, % const ssize_t x,const ssize_t y,const DrawInfo *draw_info, % const PaintMethod method) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o x,y: the starting location of the operation. % % o draw_info: the draw info. % % o method: Choose either FloodfillMethod or FillToBorderMethod. % */ MagickExport MagickBooleanType PaintFloodfillImage(Image *image, const ChannelType channel,const MagickPixelPacket *target,const ssize_t x, const ssize_t y,const DrawInfo *draw_info,const PaintMethod method) { MagickBooleanType status; status=FloodfillPaintImage(image,channel,draw_info,target,x,y, method == FloodfillMethod ? MagickFalse : MagickTrue); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % P a i n t O p a q u e I m a g e % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % Deprecated, replace with: % % OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse); % OpaquePaintImageChannel(image,channel,target,fill,MagickFalse); % % The format of the PaintOpaqueImage method is: % % MagickBooleanType PaintOpaqueImage(Image *image, % const PixelPacket *target,const PixelPacket *fill) % MagickBooleanType PaintOpaqueImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % */ MagickExport MagickBooleanType PaintOpaqueImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill) { MagickBooleanType status; status=OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse); return(status); } MagickExport MagickBooleanType PaintOpaqueImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill) { return(OpaquePaintImageChannel(image,channel,target,fill,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PaintTransparentImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % Deprecated, replace with: % % TransparentPaintImage(image,target,opacity,MagickFalse); % % The format of the PaintTransparentImage method is: % % MagickBooleanType PaintTransparentImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o opacity: the replacement opacity value. % */ MagickExport MagickBooleanType PaintTransparentImage(Image *image, const MagickPixelPacket *target,const Quantum opacity) { return(TransparentPaintImage(image,target,opacity,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P a r s e I m a g e G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ParseImageGeometry() is similar to GetGeometry() except the returned % geometry is modified as determined by the meta characters: %, !, <, % and >. % % Deprecated, replace with: % % ParseMetaGeometry(geometry,x,y,width,height); % % The format of the ParseImageGeometry method is: % % int ParseImageGeometry(char *geometry,ssize_t *x,ssize_t *y, % size_t *width,size_t *height) % % A description of each parameter follows: % % o flags: Method ParseImageGeometry returns a bitmask that indicates % which of the four values were located in the geometry string. % % o image_geometry: Specifies a character string representing the geometry % specification. % % o x,y: A pointer to an integer. The x and y offset as determined by % the geometry specification is returned here. % % o width,height: A pointer to an unsigned integer. The width and height % as determined by the geometry specification is returned here. % */ MagickExport int ParseImageGeometry(const char *geometry,ssize_t *x,ssize_t *y, size_t *width,size_t *height) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1"); return((int) ParseMetaGeometry(geometry,x,y,width,height)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P a r s e S i z e G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ParseSizeGeometry() returns a region as defined by the geometry string with % respect to the image dimensions and aspect ratio. % % Deprecated, replace with: % % ParseMetaGeometry(geometry,&region_info->x,&region_info->y, % &region_info->width,&region_info->height); % % The format of the ParseSizeGeometry method is: % % MagickStatusType ParseSizeGeometry(const Image *image, % const char *geometry,RectangeInfo *region_info) % % A description of each parameter follows: % % o geometry: The geometry (e.g. 100x100+10+10). % % o region_info: the region as defined by the geometry string. % */ MagickExport MagickStatusType ParseSizeGeometry(const Image *image, const char *geometry,RectangleInfo *region_info) { MagickStatusType flags; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.4.7"); SetGeometry(image,region_info); flags=ParseMetaGeometry(geometry,&region_info->x,&region_info->y, &region_info->width,&region_info->height); return(flags); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o p I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PopImageList() removes the last image in the list. % % Deprecated, replace with: % % RemoveLastImageFromList(images); % % The format of the PopImageList method is: % % Image *PopImageList(Image **images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport Image *PopImageList(Image **images) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(RemoveLastImageFromList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o p I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PopImagePixels() transfers one or more pixel components from the image pixel % cache to a user supplied buffer. The pixels are returned in network byte % order. MagickTrue is returned if the pixels are successfully transferred, % otherwise MagickFalse. % % The format of the PopImagePixels method is: % % size_t PopImagePixels(Image *,const QuantumType quantum, % unsigned char *destination) % % A description of each parameter follows: % % o image: the image. % % o quantum: Declare which pixel components to transfer (RGB, RGBA, etc). % % o destination: The components are transferred to this buffer. % */ MagickExport size_t PopImagePixels(Image *image,const QuantumType quantum, unsigned char *destination) { QuantumInfo *quantum_info; size_t length; quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image); if (quantum_info == (QuantumInfo *) NULL) return(0); length=ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info, quantum,destination,&image->exception); quantum_info=DestroyQuantumInfo(quantum_info); return(length); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t s c r i p t G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PostscriptGeometry() replaces any page mneumonic with the equivalent size in % picas. % % Deprecated, replace with: % % GetPageGeometry(page); % % The format of the PostscriptGeometry method is: % % char *PostscriptGeometry(const char *page) % % A description of each parameter follows. % % o page: Specifies a pointer to an array of characters. % The string is either a Postscript page name (e.g. A4) or a postscript % page geometry (e.g. 612x792+36+36). % */ MagickExport char *PostscriptGeometry(const char *page) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1"); return(GetPageGeometry(page)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P u s h I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PushImageList() adds an image to the end of the list. % % Deprecated, replace with: % % AppendImageToList(images,CloneImageList(image,exception)); % % The format of the PushImageList method is: % % unsigned int PushImageList(Image *images,const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int PushImageList(Image **images,const Image *image, ExceptionInfo *exception) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); AppendImageToList(images,CloneImageList(image,exception)); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P u s h I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PushImagePixels() transfers one or more pixel components from a user % supplied buffer into the image pixel cache of an image. The pixels are % expected in network byte order. It returns MagickTrue if the pixels are % successfully transferred, otherwise MagickFalse. % % The format of the PushImagePixels method is: % % size_t PushImagePixels(Image *image,const QuantumType quantum, % const unsigned char *source) % % A description of each parameter follows: % % o image: the image. % % o quantum: Declare which pixel components to transfer (red, green, blue, % opacity, RGB, or RGBA). % % o source: The pixel components are transferred from this buffer. % */ MagickExport size_t PushImagePixels(Image *image,const QuantumType quantum, const unsigned char *source) { QuantumInfo *quantum_info; size_t length; quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image); if (quantum_info == (QuantumInfo *) NULL) return(0); length=ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,quantum, source,&image->exception); quantum_info=DestroyQuantumInfo(quantum_info); return(length); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z a t i o n E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizationError() measures the difference between the original and % quantized images. This difference is the total quantization error. The % error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % Deprecated, replace with: % % GetImageQuantizeError(image); % % The format of the QuantizationError method is: % % unsigned int QuantizationError(Image *image) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % */ MagickExport unsigned int QuantizationError(Image *image) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3"); return(GetImageQuantizeError(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % R a n d o m C h a n n e l T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomChannelThresholdImage() changes the value of individual pixels based % on the intensity of each pixel compared to a random threshold. The result % is a low-contrast, two color image. % % The format of the RandomChannelThresholdImage method is: % % unsigned int RandomChannelThresholdImage(Image *image, % const char *channel, const char *thresholds, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o thresholds: a geometry string containing LOWxHIGH thresholds. % If the string contains 2x2, 3x3, or 4x4, then an ordered % dither of order 2, 3, or 4 will be performed instead. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int RandomChannelThresholdImage(Image *image,const char *channel,const char *thresholds,ExceptionInfo *exception) { #define RandomChannelThresholdImageText " RandomChannelThreshold image... " double lower_threshold, upper_threshold; RandomInfo *random_info; ssize_t count, y; static MagickRealType o2[4]={0.2f, 0.6f, 0.8f, 0.4f}, o3[9]={0.1f, 0.6f, 0.3f, 0.7f, 0.5f, 0.8f, 0.4f, 0.9f, 0.2f}, o4[16]={0.1f, 0.7f, 1.1f, 0.3f, 1.0f, 0.5f, 1.5f, 0.8f, 1.4f, 1.6f, 0.6f, 1.2f, 0.4f, 0.9f, 1.3f, 0.2f}, threshold=128; size_t order; /* Threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (thresholds == (const char *) NULL) return(MagickTrue); if (LocaleCompare(thresholds,"2x2") == 0) order=2; else if (LocaleCompare(thresholds,"3x3") == 0) order=3; else if (LocaleCompare(thresholds,"4x4") == 0) order=4; else { order=1; lower_threshold=0; upper_threshold=0; count=(ssize_t) sscanf(thresholds,"%lf[/x%%]%lf",&lower_threshold, &upper_threshold); if (strchr(thresholds,'%') != (char *) NULL) { upper_threshold*=(.01*QuantumRange); lower_threshold*=(.01*QuantumRange); } if (count == 1) upper_threshold=(MagickRealType) QuantumRange-lower_threshold; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " RandomChannelThresholdImage: channel type=%s",channel); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Thresholds: %s (%fx%f)",thresholds,lower_threshold,upper_threshold); if (LocaleCompare(channel,"all") == 0 || LocaleCompare(channel,"intensity") == 0) if (AcquireImageColormap(image,2) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); random_info=AcquireRandomInfo(); for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register IndexPacket index, *restrict indexes; register PixelPacket *restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; if (LocaleCompare(channel,"all") == 0 || LocaleCompare(channel,"intensity") == 0) { indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity; intensity=GetPixelIntensity(image,q); if (order == 1) { if (intensity < lower_threshold) threshold=lower_threshold; else if (intensity > upper_threshold) threshold=upper_threshold; else threshold=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info)); } else if (order == 2) threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)]; else if (order == 3) threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)]; else if (order == 4) threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)]; index=(IndexPacket) (intensity <= threshold ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } } if (LocaleCompare(channel,"opacity") == 0 || LocaleCompare(channel,"all") == 0 || LocaleCompare(channel,"matte") == 0) { if (image->matte != MagickFalse) for (x=0; x < (ssize_t) image->columns; x++) { if (order == 1) { if ((MagickRealType) q->opacity < lower_threshold) threshold=lower_threshold; else if ((MagickRealType) q->opacity > upper_threshold) threshold=upper_threshold; else threshold=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info)); } else if (order == 2) threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)]; else if (order == 3) threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)]; else if (order == 4) threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)]/1.7; SetPixelOpacity(q,(MagickRealType) q->opacity <= threshold ? 0 : QuantumRange); q++; } } else { /* To Do: red, green, blue, cyan, magenta, yellow, black */ if (LocaleCompare(channel,"intensity") != 0) ThrowBinaryException(OptionError,"UnrecognizedChannelType", image->filename); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } random_info=DestroyRandomInfo(random_info); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a c q u i r e M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReacquireMemory() changes the size of the memory and returns a pointer to % the (possibly moved) block. The contents will be unchanged up to the % lesser of the new and old sizes. % % The format of the ReacquireMemory method is: % % void ReacquireMemory(void **memory,const size_t size) % % A description of each parameter follows: % % o memory: A pointer to a memory allocation. On return the pointer % may change but the contents of the original allocation will not. % % o size: the new size of the allocated memory. % */ MagickExport void ReacquireMemory(void **memory,const size_t size) { void *allocation; assert(memory != (void **) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (*memory == (void *) NULL) { *memory=AcquireMagickMemory(size); return; } allocation=realloc(*memory,size); if (allocation == (void *) NULL) *memory=RelinquishMagickMemory(*memory); *memory=allocation; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RecolorImage() apply color transformation to an image. The method permits % saturation changes, hue rotation, luminance to alpha, and various other % effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the RecolorImage method is: % % Image *RecolorImage(const Image *image,const size_t order, % const double *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o order: the number of columns and rows in the recolor matrix. % % o color_matrix: An array of double representing the recolor matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RecolorImage(const Image *image,const size_t order, const double *color_matrix,ExceptionInfo *exception) { KernelInfo *kernel_info; Image *recolor_image; kernel_info=AcquireKernelInfo("1"); if (kernel_info == (KernelInfo *) NULL) return((Image *) NULL); kernel_info->width=order; kernel_info->height=order; kernel_info->values=(double *) color_matrix; recolor_image=ColorMatrixImage(image,kernel_info,exception); kernel_info->values=(double *) NULL; kernel_info=DestroyKernelInfo(kernel_info); return(recolor_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceNoiseImage() smooths the contours of an image while still preserving % edge information. The algorithm works by replacing each pixel with its % neighbor closest in value. A neighbor is defined by radius. Use a radius % of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the ReduceNoiseImage method is: % % Image *ReduceNoiseImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReduceNoiseImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *reduce_image; reduce_image=StatisticImage(image,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); return(reduce_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e A t t r i b u t e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageAttributeIterator() resets the image attributes iterator. Use it % in conjunction with GetNextImageAttribute() to iterate over all the values % associated with an image. % % Deprecated, replace with: % % ResetImagePropertyIterator(image); % % The format of the ResetImageAttributeIterator method is: % % ResetImageAttributeIterator(const ImageInfo *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageAttributeIterator(const Image *image) { ResetImagePropertyIterator(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t C a c h e V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as % defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % Deprecated, replace with: % % QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows, % GetCacheViewException(cache_view)); % % The format of the SetCacheViewPixels method is: % % PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows) % % A description of each parameter follows: % % o cache_view: the cache view. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows) { PixelPacket *pixels; pixels=QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows, GetCacheViewException(cache_view)); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t C a c h e T h e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetCacheThreshold() sets the amount of free memory allocated for the pixel % cache. Once this threshold is exceeded, all subsequent pixels cache % operations are to/from disk. % % The format of the SetCacheThreshold() method is: % % void SetCacheThreshold(const size_t threshold) % % A description of each parameter follows: % % o threshold: the number of megabytes of memory available to the pixel % cache. % */ MagickExport void SetCacheThreshold(const size_t size) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1"); (void) SetMagickResourceLimit(MemoryResource,size*1024*1024); (void) SetMagickResourceLimit(MapResource,2*size*1024*1024); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t E x c e p t i o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetExceptionInfo() sets the exception severity. % % The format of the SetExceptionInfo method is: % % MagickBooleanType SetExceptionInfo(ExceptionInfo *exception, % ExceptionType severity) % % A description of each parameter follows: % % o exception: the exception info. % % o severity: the exception severity. % */ MagickExport MagickBooleanType SetExceptionInfo(ExceptionInfo *exception, ExceptionType severity) { assert(exception != (ExceptionInfo *) NULL); ClearMagickException(exception); exception->severity=severity; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImage() sets the red, green, and blue components of each pixel to % the image background color and the opacity component to the specified % level of transparency. The background color is defined by the % background_color member of the image. % % The format of the SetImage method is: % % void SetImage(Image *image,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o opacity: Set each pixel to this level of transparency. % */ MagickExport void SetImage(Image *image,const Quantum opacity) { PixelPacket background_color; ssize_t y; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.0"); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); background_color=image->background_color; if (opacity != OpaqueOpacity) background_color.opacity=opacity; if (background_color.opacity != OpaqueOpacity) { (void) SetImageStorageClass(image,DirectClass); image->matte=MagickTrue; } if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) { /* Set colormapped or CMYK image. */ for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRGBO(q,&background_color); q++; } indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,0); if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } return; } /* Set DirectClass image. */ for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRGBO(q,&background_color); q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAttribute() searches the list of image attributes and replaces the % attribute value. If it is not found in the list, the attribute name % and value is added to the list. % % Deprecated, replace with: % % SetImageProperty(image,key,value); % % The format of the SetImageAttribute method is: % % MagickBooleanType SetImageAttribute(Image *image,const char *key, % const char *value) % % A description of each parameter follows: % % o image: the image. % % o key: the key. % % o value: the value. % */ MagickExport MagickBooleanType SetImageAttribute(Image *image,const char *key, const char *value) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1"); return(SetImageProperty(image,key,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageList() inserts an image into the list at the specified position. % % The format of the SetImageList method is: % % unsigned int SetImageList(Image *images,const Image *image, % const ssize_t offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o image: the image. % % o offset: the position within the list. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int SetImageList(Image **images,const Image *image, const ssize_t offset,ExceptionInfo *exception) { Image *clone; register ssize_t i; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); clone=CloneImageList(image,exception); while (GetPreviousImageInList(*images) != (Image *) NULL) (*images)=GetPreviousImageInList(*images); for (i=0; i < offset; i++) { if (GetNextImageInList(*images) == (Image *) NULL) return(MagickFalse); (*images)=GetNextImageInList(*images); } InsertImageInList(images,clone); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImagePixels() queues a mutable pixel region. % If the region is successfully initialized a pointer to a PixelPacket % array representing the region is returned, otherwise NULL is returned. % The returned pointer may point to a temporary working buffer for the % pixels or it may point to the final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This useful while the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % SetImagePixels() any way it pleases. SetImagePixels() does not initialize % the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in RAM, or in a % memory-mapped file. The returned pointer should *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain % the black color component or the colormap indexes (of type IndexPacket) % corresponding to the region. Once the PixelPacket (and/or IndexPacket) % array has been updated, the changes must be saved back to the underlying % image using SyncAuthenticPixels() or they may be lost. % % Deprecated, replace with: % % QueueAuthenticPixels(image,x,y,columns,rows,&image->exception); % % The format of the SetImagePixels() method is: % % PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows) % % A description of each parameter follows: % % o pixels: SetImagePixels returns a pointer to the pixels if they are % transferred, otherwise a NULL is returned. % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows) { return(QueueAuthenticPixels(image,x,y,columns,rows,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMagickRegistry() sets a blob into the registry and returns a unique ID. % If an error occurs, -1 is returned. % % The format of the SetMagickRegistry method is: % % ssize_t SetMagickRegistry(const RegistryType type,const void *blob, % const size_t length,ExceptionInfo *exception) % % A description of each parameter follows: % % o type: the registry type. % % o blob: the address of a Binary Large OBject. % % o length: For a registry type of ImageRegistryType use sizeof(Image) % otherise the blob length in number of bytes. % % o exception: return any errors or warnings in this structure. % */ MagickExport ssize_t SetMagickRegistry(const RegistryType type,const void *blob, const size_t magick_unused(length),ExceptionInfo *exception) { char key[MaxTextExtent]; MagickBooleanType status; static ssize_t id = 0; (void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id); status=SetImageRegistry(type,key,blob,exception); if (status == MagickFalse) return(-1); return(id++); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M o n i t o r H a n d l e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMonitorHandler() sets the monitor handler to the specified method % and returns the previous monitor handler. % % The format of the SetMonitorHandler method is: % % MonitorHandler SetMonitorHandler(MonitorHandler handler) % % A description of each parameter follows: % % o handler: Specifies a pointer to a method to handle monitors. % */ MagickExport MonitorHandler GetMonitorHandler(void) { return(monitor_handler); } MagickExport MonitorHandler SetMonitorHandler(MonitorHandler handler) { MonitorHandler previous_handler; previous_handler=monitor_handler; monitor_handler=handler; return(previous_handler); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h i f t I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShiftImageList() removes an image from the beginning of the list. % % Deprecated, replace with: % % RemoveFirstImageFromList(images); % % The format of the ShiftImageList method is: % % Image *ShiftImageList(Image **images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport Image *ShiftImageList(Image **images) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(RemoveFirstImageFromList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S i z e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SizeBlob() returns the current length of the image file or blob. % % Deprecated, replace with: % % GetBlobSize(image); % % The format of the SizeBlob method is: % % off_t SizeBlob(Image *image) % % A description of each parameter follows: % % o size: Method SizeBlob returns the current length of the image file % or blob. % % o image: the image. % */ MagickExport MagickOffsetType SizeBlob(Image *image) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3"); return((MagickOffsetType) GetBlobSize(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImageList() removes the images designated by offset and length from % the list and replaces them with the specified list. % % The format of the SpliceImageList method is: % % Image *SpliceImageList(Image *images,const ssize_t offset, % const size_t length,const Image *splices, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o offset: the position within the list. % % o length: the length of the image list to remove. % % o splice: Replace the removed image list with this list. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImageList(Image *images,const ssize_t offset, const size_t length,const Image *splices,ExceptionInfo *exception) { Image *clone; register ssize_t i; if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); clone=CloneImageList(splices,exception); while (GetPreviousImageInList(images) != (Image *) NULL) images=GetPreviousImageInList(images); for (i=0; i < offset; i++) { if (GetNextImageInList(images) == (Image *) NULL) return((Image *) NULL); images=GetNextImageInList(images); } (void) SpliceImageIntoList(&images,length,clone); return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % s R G B C o m p a n d o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBCompandor() adds the gamma function to a sRGB pixel. % % The format of the sRGBCompandor method is: % % MagickRealType sRGBCompandor(const MagickRealType pixel) % % A description of each parameter follows: % % o pixel: the pixel. % */ MagickExport MagickRealType sRGBCompandor(const MagickRealType pixel) { if (pixel <= (0.0031306684425005883*QuantumRange)) return(12.92*pixel); return(QuantumRange*(1.055*pow(QuantumScale*pixel,1.0/2.4)-0.055)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Strip() strips any whitespace or quotes from the beginning and end of a % string of characters. % % The format of the Strip method is: % % void Strip(char *message) % % A description of each parameter follows: % % o message: Specifies an array of characters. % */ MagickExport void Strip(char *message) { register char *p, *q; assert(message != (char *) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (*message == '\0') return; if (strlen(message) == 1) return; p=message; while (isspace((int) ((unsigned char) *p)) != 0) p++; if ((*p == '\'') || (*p == '"')) p++; q=message+strlen(message)-1; while ((isspace((int) ((unsigned char) *q)) != 0) && (q > p)) q--; if (q > p) if ((*q == '\'') || (*q == '"')) q--; (void) CopyMagickMemory(message,p,(size_t) (q-p+1)); message[q-p+1]='\0'; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c C a c h e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncCacheView() saves the cache view pixels to the in-memory or disk % cache. It returns MagickTrue if the pixel region is synced, otherwise % MagickFalse. % % Deprecated, replace with: % % SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view)); % % The format of the SyncCacheView method is: % % MagickBooleanType SyncCacheView(CacheView *cache_view) % % A description of each parameter follows: % % o cache_view: the cache view. % */ MagickExport MagickBooleanType SyncCacheView(CacheView *cache_view) { MagickBooleanType status; status=SyncCacheViewAuthenticPixels(cache_view, GetCacheViewException(cache_view)); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c C a c h e V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncCacheViewPixels() saves the cache view pixels to the in-memory % or disk cache. It returns MagickTrue if the pixel region is flushed, % otherwise MagickFalse. % % Deprecated, replace with: % % SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view)); % % The format of the SyncCacheViewPixels method is: % % MagickBooleanType SyncCacheViewPixels(CacheView *cache_view) % % A description of each parameter follows: % % o cache_view: the cache view. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncCacheViewPixels(CacheView *cache_view) { MagickBooleanType status; status=SyncCacheViewAuthenticPixels(cache_view, GetCacheViewException(cache_view)); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is synced, otherwise % MagickFalse. % % Deprecated, replace with: % % SyncAuthenticPixels(image,&image->exception); % % The format of the SyncImagePixels() method is: % % MagickBooleanType SyncImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType SyncImagePixels(Image *image) { return(SyncAuthenticPixels(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e m p o r a r y F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TemporaryFilename() replaces the contents of path by a unique path name. % % The format of the TemporaryFilename method is: % % void TemporaryFilename(char *path) % % A description of each parameter follows. % % o path: Specifies a pointer to an array of characters. The unique path % name is returned in this array. % */ MagickExport void TemporaryFilename(char *path) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6"); (void) AcquireUniqueFilename(path); (void) RelinquishUniqueFileResource(path); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThresholdImage() changes the value of individual pixels based on % the intensity of each pixel compared to threshold. The result is a % high-contrast, two color image. % % The format of the ThresholdImage method is: % % unsigned int ThresholdImage(Image *image,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value % */ MagickExport unsigned int ThresholdImage(Image *image,const double threshold) { #define ThresholdImageTag "Threshold/Image" IndexPacket index; ssize_t y; /* Threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (!AcquireImageColormap(image,2)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", "UnableToThresholdImage"); for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { index=(IndexPacket) (GetPixelIntensity(image,q) <= threshold ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } if (!SyncAuthenticPixels(image,&image->exception)) break; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h r e s h o l d I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThresholdImageChannel() changes the value of individual pixels based on % the intensity of each pixel channel. The result is a high-contrast image. % % The format of the ThresholdImageChannel method is: % % unsigned int ThresholdImageChannel(Image *image,const char *threshold) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % */ MagickExport unsigned int ThresholdImageChannel(Image *image, const char *threshold) { #define ThresholdImageTag "Threshold/Image" MagickPixelPacket pixel; GeometryInfo geometry_info; IndexPacket index; ssize_t y; unsigned int flags; /* Threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (threshold == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); flags=ParseGeometry(threshold,&geometry_info); pixel.red=geometry_info.rho; if (flags & SigmaValue) pixel.green=geometry_info.sigma; else pixel.green=pixel.red; if (flags & XiValue) pixel.blue=geometry_info.xi; else pixel.blue=pixel.red; if (flags & PsiValue) pixel.opacity=geometry_info.psi; else pixel.opacity=(MagickRealType) OpaqueOpacity; if (flags & PercentValue) { pixel.red*=QuantumRange/100.0f; pixel.green*=QuantumRange/100.0f; pixel.blue*=QuantumRange/100.0f; pixel.opacity*=QuantumRange/100.0f; } if (!(flags & SigmaValue)) { if (!AcquireImageColormap(image,2)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", "UnableToThresholdImage"); if (pixel.red == 0) (void) GetImageDynamicThreshold(image,2.0,2.0,&pixel,&image->exception); } for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); if (IsMagickGray(&pixel) != MagickFalse) for (x=0; x < (ssize_t) image->columns; x++) { index=(IndexPacket) (GetPixelIntensity(image,q) <= pixel.red ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRed(q,image->colormap[(ssize_t) index].red); SetPixelGreen(q,image->colormap[(ssize_t) index].green); SetPixelBlue(q,image->colormap[(ssize_t) index].blue); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,(MagickRealType) q->red <= pixel.red ? 0 : QuantumRange); SetPixelGreen(q,(MagickRealType) q->green <= pixel.green ? 0 : QuantumRange); SetPixelBlue(q,(MagickRealType) q->blue <= pixel.blue ? 0 : QuantumRange); SetPixelOpacity(q,(MagickRealType) q->opacity <= pixel.opacity ? 0 : QuantumRange); q++; } if (!SyncAuthenticPixels(image,&image->exception)) break; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformColorspace() converts the image to a specified colorspace. % If the image is already in the requested colorspace, no work is performed. % Note that the current colorspace is stored in the image colorspace member. % The transformation matrices are not necessarily the standard ones: the % weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % Deprecated, replace with: % % TransformImageColorspace(image,colorspace); % % The format of the TransformColorspace method is: % % unsigned int (void) TransformColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image to transform % % o colorspace: the desired colorspace. % */ MagickExport unsigned int TransformColorspace(Image *image, const ColorspaceType colorspace) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6"); return(TransformImageColorspace(image,colorspace)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m H S L % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformHSL() converts a (red, green, blue) to a (hue, saturation, % lightness) triple. % % The format of the TransformHSL method is: % % void TransformHSL(const Quantum red,const Quantum green, % const Quantum blue,double *hue,double *saturation,double *lightness) % % A description of each parameter follows: % % o red, green, blue: A Quantum value representing the red, green, and % blue component of a pixel.. % % o hue, saturation, lightness: A pointer to a double value representing a % component of the HSL color space. % */ static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } MagickExport void TransformHSL(const Quantum red,const Quantum green, const Quantum blue,double *hue,double *saturation,double *lightness) { MagickRealType b, delta, g, max, min, r; /* Convert RGB to HSL colorspace. */ assert(hue != (double *) NULL); assert(saturation != (double *) NULL); assert(lightness != (double *) NULL); r=QuantumScale*red; g=QuantumScale*green; b=QuantumScale*blue; max=MagickMax(r,MagickMax(g,b)); min=MagickMin(r,MagickMin(g,b)); *hue=0.0; *saturation=0.0; *lightness=(double) ((min+max)/2.0); delta=max-min; if (delta == 0.0) return; *saturation=(double) (delta/((*lightness < 0.5) ? (min+max) : (2.0-max-min))); if (r == max) *hue=(double) (g == min ? 5.0+(max-b)/delta : 1.0-(max-g)/delta); else if (g == max) *hue=(double) (b == min ? 1.0+(max-r)/delta : 3.0-(max-b)/delta); else *hue=(double) (r == min ? 3.0+(max-g)/delta : 5.0-(max-r)/delta); *hue/=6.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s l a t e T e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TranslateText() replaces any embedded formatting characters with the % appropriate image attribute and returns the translated text. % % Deprecated, replace with: % % InterpretImageProperties(image_info,image,embed_text); % % The format of the TranslateText method is: % % char *TranslateText(const ImageInfo *image_info,Image *image, % const char *embed_text) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o embed_text: the address of a character string containing the embedded % formatting characters. % */ MagickExport char *TranslateText(const ImageInfo *image_info,Image *image, const char *embed_text) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.6"); return(InterpretImageProperties(image_info,image,embed_text)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentImage method is: % % MagickBooleanType TransparentImage(Image *image, % const PixelPacket target,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o opacity: the replacement opacity value. % */ MagickExport MagickBooleanType TransparentImage(Image *image, const PixelPacket target,const Quantum opacity) { #define TransparentImageTag "Transparent/Image" MagickBooleanType proceed; ssize_t y; /* Make image color transparent. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0"); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) != MagickFalse) q->opacity=opacity; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; proceed=SetImageProgress(image,TransparentImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h i f t I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnshiftImageList() adds the image to the beginning of the list. % % Deprecated, replace with: % % PrependImageToList(images,CloneImageList(image,exception)); % % The format of the UnshiftImageList method is: % % unsigned int UnshiftImageList(Image *images,const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int UnshiftImageList(Image **images,const Image *image, ExceptionInfo *exception) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); PrependImageToList(images,CloneImageList(image,exception)); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + V a l i d a t e C o l o r m a p I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ValidateColormapIndex() validates the colormap index. If the index does % not range from 0 to the number of colors in the colormap an exception % issued and 0 is returned. % % Deprecated, replace with: % % ConstrainColormapIndex(image,index); % % The format of the ValidateColormapIndex method is: % % IndexPacket ValidateColormapIndex(Image *image,const unsigned int index) % % A description of each parameter follows: % % o index: Method ValidateColormapIndex returns colormap index if it is % valid other an exception issued and 0 is returned. % % o image: the image. % % o index: This integer is the colormap index. % */ MagickExport IndexPacket ValidateColormapIndex(Image *image, const size_t index) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.4"); return(ConstrainColormapIndex(image,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z o o m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZoomImage() creates a new image that is a scaled size of an existing one. % It allocates the memory necessary for the new Image structure and returns a % pointer to the new image. The Point filter gives fast pixel replication, % Triangle is equivalent to bi-linear interpolation, and Mitchel giver slower, % very high-quality results. See Graphic Gems III for details on this % algorithm. % % The filter member of the Image structure specifies which image filter to % use. Blur specifies the blur factor where > 1 is blurry, < 1 is sharp. % % The format of the ZoomImage method is: % % Image *ZoomImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: An integer that specifies the number of columns in the zoom % image. % % o rows: An integer that specifies the number of rows in the scaled % image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ZoomImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { Image *zoom_image; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); zoom_image=ResizeImage(image,columns,rows,image->filter,image->blur, exception); return(zoom_image); } #endif
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; int fail = 0; INIT(); // ************************** // Series 1: no dist_schedule // ************************** // // Test: #iterations == #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute num_teams(512) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute num_teams(256) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute num_teams(256) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // **************************** // Series 2: with dist_schedule // **************************** // // Test: #iterations == #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute dist_schedule(static,1) num_teams(512) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations == #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute dist_schedule(static,512) num_teams(512) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations == #teams, dist_schedule(#iterations/10), variable chunk size // ZERO(A); int ten = 10; int chunkSize = 512/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute dist_schedule(static,chunkSize) num_teams(512) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute dist_schedule(static,1) num_teams(256) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute dist_schedule(static,500) num_teams(256) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(#iterations/10), variable chunk size // ZERO(A); ten = 10; chunkSize = 500/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute dist_schedule(static,chunkSize) num_teams(256) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute dist_schedule(static,1) num_teams(256) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute dist_schedule(static,123) num_teams(256) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(#iterations) // ZERO(A); ten = 10; chunkSize = 123/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute dist_schedule(static,chunkSize) num_teams(256) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // **************************** // Series 3: with ds attributes // **************************** // // Test: private // ZERO(A); ZERO(B); double p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute private(p,q) num_teams(256) for(int i = 0 ; i < N ; i++) { p = 2; q = 3; A[i] += p; B[i] += q; } } for(int i = 0 ; i < N ; i++) { if (A[i] != TRIALS*2) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]); fail = 1; } if (B[i] != TRIALS*3) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: firstprivate // ZERO(A); ZERO(B); p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute firstprivate(p,q) num_teams(64) for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team) q += 7.0; A[i] += p; B[i] += q; } } for(int i = 0 ; i < 128 ; i++) { if (i % 2 == 0) { if (A[i] != (2.0+3.0)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]); fail = 1; } } else { if (A[i] != (2.0+3.0*2)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0*2)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]); fail = 1; } } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: lastprivate // // requires array because scalar would be treated as implicit firstprivate by target int lastpriv[2] = {-1,-1}; #pragma omp target teams distribute lastprivate(lastpriv) num_teams(10) for(int i = 0 ; i < omp_get_num_teams() ; i++) { lastpriv[0] = omp_get_team_num(); } if(lastpriv[0] != 9) { printf("lastpriv value is %d and should have been %d\n", lastpriv[0], 9); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // *************************** // Series 4: with parallel for // *************************** // // Test: simple blocking loop // ZERO(A); ZERO(B); int nte = 32; int tl = 64; int blockSize = tl; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute num_teams(nte) thread_limit(tl) for(int j = 0 ; j < 256 ; j += blockSize) { #pragma omp parallel for for(int i = j ; i < j+blockSize; i++) { A[i] += B[i] + C[i]; } } } for(int i = 0 ; i < 256 ; i++) { if (A[i] != TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: blocking loop where upper bound is not a multiple of tl*nte // ZERO(A); ZERO(B); nte = 32; tl = 64; blockSize = tl; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute num_teams(nte) thread_limit(tl) for(int j = 0 ; j < 510 ; j += blockSize) { int ub = (j+blockSize < 510) ? (j+blockSize) : 512; #pragma omp parallel for for(int i = j ; i < ub; i++) { A[i] += B[i] + C[i]; } } } for(int i = 0 ; i < 256 ; i++) { if (A[i] != TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // ************************** // Series 5: collapse // ************************** // // Test: 2 loops // double * S = (double*)malloc(N*N*sizeof(double)); double * T = (double*)malloc(N*N*sizeof(double)); double * U = (double*)malloc(N*N*sizeof(double)); for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) { S[i*N+j] = 0.0; T[i*N+j] = 1.0; U[i*N+j] = 2.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute collapse(2) map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N]) num_teams(512) for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t } for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) if (S[i*N+j] != TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: 3 loops // int M = N/8; double * V = (double*)malloc(M*M*M*sizeof(double)); double * Z = (double*)malloc(M*M*M*sizeof(double)); for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) { V[i*M*M+j*M+k] = 2.0; Z[i*M*M+j*M+k] = 3.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams distribute collapse(3) map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M]) num_teams(512) for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t } for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); return 0; }
stream.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> #ifdef USE_CALI #include <caliper/cali.h> #endif #ifdef USE_LIKWID #include <likwid-marker.h> #endif /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 60000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 10 #endif #endif #ifndef NTIMES # define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #ifdef USE_CALI cali_id_t thread_attr = cali_create_attribute("thread_id", CALI_TYPE_INT, CALI_ATTR_ASVALUE | CALI_ATTR_SKIP_EVENTS); #pragma omp parallel { cali_set_int(thread_attr, omp_get_thread_num()); } #endif #ifdef USE_LIKWID LIKWID_MARKER_INIT; #endif #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } #ifdef USE_LIKWID LIKWID_MARKER_CLOSE; #endif /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults () { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifndef BW_SCALE #define BW_SCALE 1 #endif #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j, _b; #pragma omp parallel { #ifdef USE_CALI CALI_MARK_BEGIN("copy"); #endif #ifdef USE_LIKWID LIKWID_MARKER_START("copy"); #endif #pragma omp for for (j=0; j<STREAM_ARRAY_SIZE; j++) for (_b = 0; _b < BW_SCALE; _b++) c[j] = a[j]; #ifdef USE_CALI CALI_MARK_END("copy"); #endif #ifdef USE_LIKWID LIKWID_MARKER_STOP("copy"); #endif } // parallel } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j, _b; #pragma omp parallel { #ifdef USE_CALI CALI_MARK_BEGIN("scale"); #endif #ifdef USE_LIKWID LIKWID_MARKER_START("scale"); #endif #pragma omp for for (j=0; j<STREAM_ARRAY_SIZE; j++) for (_b = 0; _b < BW_SCALE; _b++) b[j] = scalar*c[j]; #ifdef USE_CALI CALI_MARK_END("scale"); #endif #ifdef USE_LIKWID LIKWID_MARKER_STOP("scale"); #endif } // parallel } void tuned_STREAM_Add() { ssize_t j, _b; #pragma omp parallel { #ifdef USE_CALI CALI_MARK_BEGIN("add"); #endif #ifdef USE_LIKWID LIKWID_MARKER_START("add"); #endif #pragma omp for for (j=0; j<STREAM_ARRAY_SIZE; j++) for (_b = 0; _b < BW_SCALE; _b++) c[j] = a[j]+b[j]; #ifdef USE_CALI CALI_MARK_END("add"); #endif #ifdef USE_LIKWID LIKWID_MARKER_STOP("add"); #endif } // parallel } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j, _b; #pragma omp parallel { #ifdef USE_CALI CALI_MARK_BEGIN("triad"); #endif #ifdef USE_LIKWID LIKWID_MARKER_START("triad"); #endif #pragma omp for for (j=0; j<STREAM_ARRAY_SIZE; j++) for (_b = 0; _b < BW_SCALE; _b++) a[j] = b[j]+scalar*c[j]; #ifdef USE_CALI CALI_MARK_END("triad"); #endif #ifdef USE_LIKWID LIKWID_MARKER_STOP("triad"); #endif } // parallel } /* end of stubs for the "tuned" versions of the kernels */ #endif
HardTanh.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/HardTanh.c" #else void THNN_(HardTanh_updateOutput)( THNNState *state, THTensor *input, THTensor *output, accreal min_val_, accreal max_val_, bool inplace) { real min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_); real max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_); if (inplace) THTensor_(set)(output, input); else THTensor_(resizeAs)(output, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) { if (inplace) { TH_TENSOR_APPLY(real, input, if (*input_data < min_val) *input_data = min_val; else if (*input_data > max_val) *input_data = max_val; ); } else { TH_TENSOR_APPLY2(real, output, real, input, if (*input_data < min_val) *output_data = min_val; else if (*input_data <= max_val) *output_data = *input_data; else *output_data = max_val; ); } } else { real* ptr_input = THTensor_(data)(input); real* ptr_output = THTensor_(data)(output); ptrdiff_t i; ptrdiff_t n = THTensor_(nElement)(input); if (inplace) #pragma omp parallel for private(i) for (i = 0; i < n; i++) { if (ptr_input[i] < min_val) ptr_input[i] = min_val; else if (ptr_input[i] > max_val) ptr_input[i] = max_val; } else #pragma omp parallel for private(i) for (i = 0; i < n; i++) { if (ptr_input[i] < min_val) ptr_output[i] = min_val; else if (ptr_input[i] <= max_val) ptr_output[i] = ptr_input[i]; else ptr_output[i] = max_val; } } } void THNN_(HardTanh_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, accreal min_val_, accreal max_val_, bool inplace) { real min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_); real max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_); THNN_CHECK_NELEMENT(input, gradOutput); if (inplace) THTensor_(set)(gradInput, gradOutput); else THTensor_(resizeAs)(gradInput, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { if (inplace) { TH_TENSOR_APPLY2(real, gradOutput, real, input, if (*input_data <= min_val || *input_data >= max_val) *gradOutput_data = 0; ); } else TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, if (*input_data <= min_val || *input_data >= max_val) *gradInput_data = 0; else *gradInput_data = *gradOutput_data; ); } else { real* ptr_gradOutput = THTensor_(data)(gradOutput); real* ptr_gradInput = THTensor_(data)(gradInput); real* ptr_input = THTensor_(data)(input); ptrdiff_t i; ptrdiff_t n = THTensor_(nElement)(input); if (inplace) #pragma omp parallel for private(i) for (i = 0; i < n; i++) { if (ptr_input[i] <= min_val || ptr_input[i] >= max_val) ptr_gradInput[i] = 0; } else #pragma omp parallel for private(i) for (i = 0; i < n; i++) { if (ptr_input[i] <= min_val || ptr_input[i] >= max_val) ptr_gradInput[i] = 0; else ptr_gradInput[i] = ptr_gradOutput[i]; } } } #endif
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class IfStmtBitfields { friend class IfStmt; unsigned : NumStmtBits; unsigned IsConstexpr : 1; }; class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 17 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class OpaqueValueExprBitfields { friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expressio if this /// bit is set to true. unsigned IsUnique : 1; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; union { StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; IfStmtBitfields IfStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; OpaqueValueExprBitfields OpaqueValueExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; CoawaitExprBitfields CoawaitBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only Expr * /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). struct ExprIterator : llvm::iterator_adaptor_base<ExprIterator, Stmt **, std::random_access_iterator_tag, Expr *> { ExprIterator() : iterator_adaptor_base(nullptr) {} ExprIterator(Stmt **I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<Expr **>(I); } }; /// Const iterator for iterating over Stmt * arrays that contain only Expr * struct ConstExprIterator : llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *, std::random_access_iterator_tag, const Expr *const> { ConstExprIterator() : iterator_adaptor_base(nullptr) {} ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<const Expr *const *>(I); } }; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) == sizeof(void *), "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, const ASTContext *Context = nullptr) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); const Stmt *IgnoreImplicit() const { return const_cast<Stmt *>(this)->IgnoreImplicit(); } /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro = false; public: friend class ASTStmtReader; friend class ASTStmtWriter; NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; SourceLocation LBraceLoc, RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); body_begin()[size() - 1] = S; } using const_body_iterator = Stmt* const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), KeywordLoc(KWLoc), ColonLoc(ColonLoc) {} SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { SourceLocation EllipsisLoc; enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) {} SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public Stmt { SourceLocation IdentLoc; LabelDecl *TheDecl; Stmt *SubStmt; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) { static_assert(sizeof(LabelStmt) == 2 * sizeof(SourceLocation) + 2 * sizeof(void *), "LabelStmt too big"); } // Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public Stmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt : public Stmt { enum { INIT, VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, bool IsConstexpr, Stmt *init, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) {} /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Stmt *getInit() { return SubExprs[INIT]; } const Stmt *getInit() const { return SubExprs[INIT]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } bool isObjCAvailabilityCheck() const; SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt : public Stmt { SourceLocation SwitchLoc; enum { INIT, VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements and, if the // SwitchStmt is a switch on an enum value, records whether all the enum // values were covered by CaseStmts. The coverage information value is meant // to be a hint for possible clients. llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase; public: SwitchStmt(const ASTContext &C, Stmt *Init, VarDecl *Var, Expr *cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) {} /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Stmt *getInit() { return SubExprs[INIT]; } const Stmt *getInit() const { return SubExprs[INIT]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); } /// Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase.getPointer()); FirstCase.setPointer(SC); } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { FirstCase.setInt(true); } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return FirstCase.getInt(); } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt : public Stmt { SourceLocation WhileLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) {} /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { SourceLocation DoLoc; enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { SourceLocation ForLoc; enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) { static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation), "BreakStmt too large"); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt : public Stmt { SourceLocation RetLoc; Stmt *RetExpr; const VarDecl *NRVOCandidate; public: explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E), NRVOCandidate(NRVOCandidate) {} /// Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) {} const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(child_iterator(), child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return {}; } SourceLocation getLocEnd() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
task_underIf.c
/* test pragma under the true body of if statement * two cases: inside {} or directly attached to true/false body * * Liao, 10/1/2008 * */ extern void process(int); extern void process2(int); int item[100]; int cutoff = 100; void foo(int i) { /*pragma needs scope fixes */ if (i%2==0) #pragma omp task if (i < cutoff) process (item[i]); else #pragma omp task process2(item[i]); /*pragma within explicit scopes */ if (i%2==0) { #pragma omp task process (item[i]); } else { #pragma omp task process2(item[i]); } }
gm_dfs_template.h
#ifndef GM_DFS_TEMPLATE_H #define GM_DFS_TEMPLATE_H #include <omp.h> #include <string.h> #include <set> #include <vector> #include "gm_graph.h" //----------------------------------------------- // template for DFS // Note that recursion-base DFS will surely crash due to // stack overflow, when applied to small-world graphs. // (It will visit O(N) nodes before ever pop up) // Thus, here we implement DFS withour recursion. //----------------------------------------------- struct _dfs_state { _dfs_state(node_t N, edge_t I, edge_t E) : node(N), idx(I), end(E) { } node_t node; // node edge_t idx; // edge idx edge_t end; // }; template<bool has_pre_visit, bool has_post_visit, bool has_navigator, bool use_reverse_edge> class gm_dfs_template { protected: virtual void visit_pre(node_t t)=0; virtual void visit_post(node_t t)=0; virtual bool check_navigator(node_t t, edge_t idx)=0; public: gm_dfs_template(gm_graph& _G) : G(_G) { visited_bitmap = NULL; // bitmap } virtual ~gm_dfs_template() { delete visited_bitmap; } void prepare(node_t root_node) { root = root_node; cnt = 0; visited_small.clear(); is_small = true; curr_node = INVALID_NODE; curr_idx = 0; curr_end = 0; THRESHOLD_LARGE = std::max((int)(G.num_nodes()*0.1), 4096); } void do_dfs() { enter_node(root); main_loop(); } private: void prepare_large() { delete[] visited_bitmap; visited_bitmap = new unsigned char[(G.num_nodes() + 7) / 8]; #pragma omp parallel for for (int i = 0; i < (G.num_nodes() + 7) / 8; i++) visited_bitmap[i] = 0; std::set<node_t>::iterator I; for (I = visited_small.begin(); I != visited_small.end(); I++) { node_t u = *I; _gm_set_bit(visited_bitmap, u); } is_small = false; stack.reserve(G.num_nodes()); } void enter_node(node_t n) { // push current node _dfs_state S(curr_node, curr_idx, curr_end); stack.push_back(S); curr_node = n; curr_idx = (use_reverse_edge) ? G.r_begin[n] : G.begin[n]; curr_end = (use_reverse_edge) ? G.r_begin[n + 1] : G.begin[n + 1]; // mark visited add_visited(n); cnt++; if (cnt == THRESHOLD_LARGE) // if go over threshold, it will probably visit all the nodes { prepare_large(); } if (has_pre_visit) visit_pre(n); } void exit_node(node_t n) { if (has_post_visit) visit_post(n); _dfs_state S = stack.back(); stack.pop_back(); curr_node = S.node; curr_idx = S.idx; curr_end = S.end; } void main_loop() { //---------------------------------- // Repeat until stack is empty //---------------------------------- while (curr_node != INVALID_NODE) { //---------------------------------- // Every neighbor has been visited //---------------------------------- if (curr_idx == curr_end) { exit_node(curr_node); continue; } else { //---------------------------------- // check every non-visited neighbor //---------------------------------- node_t z; if (use_reverse_edge) { z = G.r_node_idx[curr_idx]; } else { z = G.node_idx[curr_idx]; } if (has_visited(z)) { curr_idx++; continue; } if (has_navigator) { if (check_navigator(z, curr_idx) == false) { curr_idx++; continue; } } curr_idx++; enter_node(z); continue; } } } void add_visited(node_t n) { if (is_small) visited_small.insert(n); else _gm_set_bit(visited_bitmap, n); } bool has_visited(node_t n) { if (is_small) { return (visited_small.find(n) != visited_small.end()); } else { return _gm_get_bit(visited_bitmap, n); } } protected: node_t root; gm_graph& G; // stack implementation node_t stack_ptr; std::vector<_dfs_state> stack; node_t curr_node; edge_t curr_idx; edge_t curr_end; // visited set implementation node_t cnt; unsigned char* visited_bitmap; std::set<node_t> visited_small; bool is_small; int THRESHOLD_LARGE; static const node_t INVALID_NODE = -1; }; #endif
begin_declare_variant_elided_range_withouth_end.c
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s // RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s #pragma omp begin declare variant match(device={kind(gpu)}) // expected-note {{to match this '#pragma omp begin declare variant'}} int also_before(void) { return 0; } #pragma omp begin declare variant match(device={kind(gpu)}) int also_after(void) { return 2; } int also_before(void) { return 2; } #pragma omp end declare variant #pragma omp begin declare variant match(device={kind(fpga)}) This text is never parsed! #pragma omp end declare variant int also_after(void) { return 0; } int test() { return also_after() + also_before(); } // expected-error {{expected '#pragma omp end declare variant'}}
VectorMatrix.h
// Copyright 2015 Christina Teflioudi // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* * VectorMatrix.h * * Created on: Oct 10, 2013 * Author: chteflio */ #ifndef VECTORMATRIX_H_ #define VECTORMATRIX_H_ #include <boost/numeric/ublas/matrix_proxy.hpp> #include <fstream> #include <iostream> #include <cmath> #include <util/exception.h> #include <util/io.h> #include <string> #include <ostream> #include <iomanip> #include <boost/unordered_map.hpp> #include <boost/algorithm/string/predicate.hpp> #ifdef WITH_SIMD #include <pmmintrin.h> //SSE3 #endif using boost::unordered_map; namespace mips { inline void skipLineFromFile(std::ifstream & file) { char c = 0; while (c != '\n' && !file.eof() && file.good()) { file >> std::noskipws >> c; } file >> std::skipws; } inline void computeDefaultBlockOffsets(row_type size, row_type blocks, std::vector<row_type>& blockOffsets, row_type start = 0) { blockOffsets.resize(blocks); row_type minSize = size / blocks; row_type remainder = size % blocks; for (row_type i = 0; i < blocks; ++i) { if (i == 0) { blockOffsets[i] = start; } else { blockOffsets[i] = minSize + blockOffsets[i - 1]; if (remainder > 0) { ++blockOffsets[i]; --remainder; } } } }; inline void scaleAndCopy(double* v1, const double* v2, double scale, col_type colNum) { for (int j = 0; j < colNum; ++j) { v1[j] = v2[j] * scale; } } inline void copy(double* v1, const double* v2, col_type colNum) { for (int j = 0; j < colNum; ++j) { v1[j] = v2[j]; } // std::memcpy((void*) v1, (void*) v2, sizeof (double)*colNum); } inline double calculateLength(const double* vec, col_type colNum) { double len = 0; for (int j = 0; j < colNum; ++j) { len += vec[j] * vec[j]; } return sqrt(len); } class VectorMatrix { double* data; bool shuffled, normalized, extraMult; row_type offset; col_type lengthOffset; int sizeDiv2; // for simd instruction inline void zeroOutLastPadding() { for (row_type i = 0; i < rowNum; ++i) { data[(i + 1) * offset - 1] = 0; // zero-out the last padding } } // stuff that needs to be done in both read methods // rowNum and colNum need to be initialized before calling this method inline void readFromFileCommon() { if (pow(2, sizeof (col_type) * 8) - 1 < colNum) { std::cerr << "Your vectors have dimensionality " << colNum << " which is more than what lemp is compiled to store. Change the col_type in BasicStructs.h and recompile!" << std::endl; exit(1); } if (pow(2, sizeof (row_type) * 8) - 1 < rowNum) { std::cerr << "Your dataset has " << rowNum << " vectors which is more than what lemp is compiled to store. Change the row_type in BasicStructs.h and recompile!" << std::endl; exit(1); } initializeBasics(colNum, rowNum, false); if (colNum < NUM_LISTS) { std::cout << "[WARNING] Your vectors have dimensionality" << colNum << " and the tuner will try to search among " << NUM_LISTS << ". Perhaps you want to change the parameter NUM_LISTS in Definitions.h and recompile!" << std::endl; } if (LOWER_LIMIT_PER_BUCKET >= rowNum) { std::cout << "[WARNING] You have " << rowNum << " vectors and the tuner will try to take a sample of at least " << LOWER_LIMIT_PER_BUCKET << " vectors per probe bucket. Perhaps you want to change the parameter LOWER_LIMIT_PER_BUCKET in Definitions.h and recompile!" << std::endl; } for (int i = 0; i < rowNum; i++) { setLengthInData(i, 1); } } inline void readFromFileCSV(const std::string& fileName, ta_size_type col, ta_size_type row) { std::ifstream file(fileName.c_str(), std::ios_base::in); if (!file.is_open()) { std::cout << "[ERROR] Fail to open file: " << fileName << std::endl; exit(1); } rowNum = row; colNum = col; std::cout << "[INFO] VectorMatrix will be read from " << fileName << " (" << rowNum << " vectors with dimensionality " << (0 + colNum) << ")" << std::endl; VectorMatrix::readFromFileCommon(); std::string buffer; if (file) { for (ta_size_type i = 0; i < row; i++) { double* d = getMatrixRowPtr(i); for (ta_size_type j = 0; j < col; j++) { double f; file >> f; if (j != col - 1) { std::getline(file, buffer, ','); } d[j] = f; } std::getline(file, buffer); } } file.close(); } inline void readFromFileMMA(const std::string& fileName, bool left = true) { std::ifstream file(fileName.c_str(), std::ios_base::in); if (!file.is_open()) { std::cout << "[ERROR] Fail to open file: " << fileName << std::endl; exit(1); } while (file.peek() == '%') { skipLineFromFile(file); } ta_size_type col; // columns ta_size_type row; // rows file >> row >> col; rowNum = (left ? row : col); colNum = (left ? col : row); std::cout << "[INFO] VectorMatrix will be read from " << fileName << " (" << rowNum << " vectors with dimensionality " << (0 + colNum) << ")" << std::endl; VectorMatrix::readFromFileCommon(); if (left) { if (file) { for (ta_size_type i = 0; i < col; i++) {// read one column for (ta_size_type j = 0; j < row; j++) { double f; file >> f; double* d = getMatrixRowPtr(j); d[i] = f; } } } file.close(); } else { if (file) { for (ta_size_type i = 0; i < col; i++) {// read one column for (ta_size_type j = 0; j < row; j++) { double f; file >> f; double* d = getMatrixRowPtr(i); d[j] = f; } } } file.close(); } } // const VectorMatrix& operator =(const VectorMatrix& m); public: std::vector<double> cweights; // forAP std::vector<double> maxVectorCoord; // forAP std::vector<row_type> vectorNNZ; // forAP std::vector<QueueElement> lengthInfo; // data: length id: vectorId std::vector<double> epsilonEquivalents; col_type colNum; row_type rowNum; friend void splitMatrices(const VectorMatrix& originalMatrix, std::vector<VectorMatrix>& matrices); friend void initializeMatrices(const VectorMatrix& originalMatrix, std::vector<VectorMatrix>& matrices, bool sort, bool ignoreLengths, double epsilon); inline VectorMatrix() : data(nullptr), shuffled(false), normalized(false), lengthOffset(1) {////////////////////// 1 is for padding } inline VectorMatrix(const std::vector<std::vector<double> > m) : data(nullptr), shuffled(false), normalized(false), lengthOffset(1){ initializeBasics(m[0].size(), m.size(), false); #pragma omp parallel for schedule(static, 1000) for (int i = 0; i < rowNum; ++i) { double* v1 = getMatrixRowPtr(i); const double * v2 = &m[i][0]; // std::memcpy((void*) v1, (void*) v2, sizeof (double)*colNum); copy(v1, v2, colNum); // for (int j = 0; j < colNum; ++j) { //// v1[j] = v2[j]; // std::cout<<v1[j]<<" "; // } // std::cout<<std::endl; } // std::cout<<"offset: "<<(int)offset<<" "<<(int)lengthOffset<<std::endl; } VectorMatrix& operator=(const VectorMatrix& r) { colNum = r.colNum; rowNum = r.rowNum; shuffled = r.shuffled; normalized = r.normalized; extraMult = r.extraMult; offset = r.offset; lengthOffset = r.lengthOffset; sizeDiv2 = r.sizeDiv2; lengthInfo.clear(); lengthInfo.reserve(r.lengthInfo.size()); std::copy(r.lengthInfo.begin(), r.lengthInfo.end(), back_inserter(lengthInfo)); cweights.clear(); cweights.reserve(r.cweights.size()); std::copy(r.cweights.begin(), r.cweights.end(), back_inserter(cweights)); maxVectorCoord.clear(); maxVectorCoord.reserve(r.maxVectorCoord.size()); std::copy(r.maxVectorCoord.begin(), r.maxVectorCoord.end(), back_inserter(maxVectorCoord)); vectorNNZ.clear(); vectorNNZ.reserve(r.vectorNNZ.size()); std::copy(r.vectorNNZ.begin(), r.vectorNNZ.end(), back_inserter(vectorNNZ)); epsilonEquivalents.clear(); epsilonEquivalents.reserve(r.epsilonEquivalents.size()); std::copy(r.epsilonEquivalents.begin(), r.epsilonEquivalents.end(), back_inserter(epsilonEquivalents)); int res = posix_memalign((void **) &(data), 16, sizeof (double)* offset * rowNum); if (res != 0) { std::cout << "[ERROR] Problem with allocating memory for VectorMatrix!" << std::endl; exit(1); } std::memcpy((void*) data, (void*) r.data, sizeof (double)* offset * rowNum); } inline ~VectorMatrix() { if (data != nullptr) { free(data); data = nullptr; } } inline void fillInRandom(row_type rows, col_type cols) { initializeBasics(cols, rows, false); rg::Random32 rand(time(nullptr)); for (int i = 0; i < rowNum; ++i) { double * vec = getMatrixRowPtr(i); for (int j = 0; j < colNum; ++j) { vec[j] = rand.nextDouble(); } } } inline void initializeBasics(col_type numOfColumns, row_type numOfRows, bool norm) { colNum = numOfColumns; offset = colNum + 2; sizeDiv2 = colNum & (-2); extraMult = (sizeDiv2 < colNum); if (extraMult) offset++; rowNum = numOfRows; normalized = norm; lengthInfo.resize(rowNum); int res = posix_memalign((void **) &(data), 16, sizeof (double)* offset * rowNum); if (res != 0) { std::cout << "[ERROR] Problem with allocating memory for VectorMatrix!" << std::endl; exit(1); } if (extraMult) { zeroOutLastPadding(); } } inline void readFromFile(const std::string& fileName, int numCoordinates, int numVectors, bool left = true) { if (boost::algorithm::ends_with(fileName, ".csv")) { if (numCoordinates == 0 || numVectors == 0) { std::cerr << "When using csv files, you should provide the number of coordinates (--r) and the number of vectors (--m or --n)!" << std::endl; exit(1); } readFromFileCSV(fileName, numCoordinates, numVectors); } else if (boost::algorithm::ends_with(fileName, ".mma")) { readFromFileMMA(fileName, left); } else { std::cerr << "No valid input file format to read a VectorMatrix from!" << std::endl; exit(1); } } inline void init(const VectorMatrix& matrix, bool sort, bool ignoreLength) { initializeBasics(matrix.colNum, matrix.rowNum, true); if (ignoreLength) { #pragma omp parallel for schedule(static, 1000) // get lengths for (int i = 0; i < rowNum; ++i) { const double* vec = matrix.getMatrixRowPtr(i); double len = calculateLength(vec, colNum); lengthInfo[i] = QueueElement(1, i); setLengthInData(i, 1); double x = 1 / len; double * d1 = getMatrixRowPtr(i); scaleAndCopy(d1, vec, x, colNum); } } else { #pragma omp parallel for schedule(static,1000) for (int i = 0; i < rowNum; ++i) { const double* vec = matrix.getMatrixRowPtr(i); double len = calculateLength(vec, colNum); lengthInfo[i] = QueueElement(len, i); } if (sort) { shuffled = true; std::sort(lengthInfo.begin(), lengthInfo.end(), std::greater<QueueElement>()); } #pragma omp parallel for schedule(static,1000) for (int i = 0; i < rowNum; ++i) { setLengthInData(i, lengthInfo[i].data); double x = 1 / lengthInfo[i].data; double * d1 = getMatrixRowPtr(i); double * d2 = matrix.getMatrixRowPtr(lengthInfo[i].id); scaleAndCopy(d1, d2, x, colNum); } } } inline void addVectors(const VectorMatrix& matrix, const std::vector<row_type>& dataIds) { initializeBasics(matrix.colNum, dataIds.size(), false); for (int i = 0; i < rowNum; ++i) { const double* vec = matrix.getMatrixRowPtr(dataIds[i]); lengthInfo[i] = QueueElement(1, dataIds[i]); double * d1 = getMatrixRowPtr(i); scaleAndCopy(d1, vec, 1, colNum); } } inline double* getMatrixRowPtr(row_type row) const {// the row starts from pos 1. Do ptr[-1] to get the length return &data[row * offset + 1 + lengthOffset]; } inline void print(row_type row) const { const double* vec = getMatrixRowPtr(row); for (int i = 0; i < colNum; ++i) { std::cout << i << ":" << vec[i] << " "; } std::cout << std::endl; std::cout << "Length: " << vec[-1] << " or " << lengthInfo[row].data << std::endl; std::cout << "hasId: " << lengthInfo[row].id << std::endl; } inline double getVectorLength(row_type row) const { return data[row * offset + lengthOffset]; } inline double setLengthInData(row_type row, double len) { return data[row * offset + lengthOffset] = len; } inline row_type getId(row_type row) const { return (normalized ? lengthInfo[row].id : row); } inline double cosine(row_type row, const double* query) const { const double* d_ptr = getMatrixRowPtr(row); double cosine = 0; #ifdef WITH_SIMD __m128d sum = _mm_set1_pd(0.0); int size = colNum + extraMult; for (int i = 0; i < size; i += 2) { sum = _mm_add_pd(sum, _mm_mul_pd(_mm_load_pd(d_ptr + i), _mm_load_pd(query + i))); } cosine = _mm_cvtsd_f64(_mm_hadd_pd(sum, sum)); return cosine; #else for (int i = 0; i < colNum; ++i) { cosine += query[i] * d_ptr[i]; } return cosine; #endif } inline double L2Distance(row_type row, const double* query)const { const double* d_ptr = getMatrixRowPtr(row); double dist = 0; if (normalized) { for (int i = 0; i < colNum; ++i) { double value = query[i] * query[-1] - d_ptr[i] * d_ptr[-1]; // unnormalize dist += value * value; } } else { for (int i = 0; i < colNum; ++i) { dist += (query[i] - d_ptr[i]) * (query[i] - d_ptr[i]); } } return sqrt(dist); } inline double L2Distance2(row_type row, const double* query)const { // I assume non normalized case as needed in PCA trees const double* d_ptr = getMatrixRowPtr(row); double dist = 0; for (int i = 0; i < colNum; ++i) { dist += (query[i] - d_ptr[i]) * (query[i] - d_ptr[i]); } return dist; } inline double innerProduct(row_type row, const double* query) const { const double ip = query[-1] * getVectorLength(row) * cosine(row, query); return ip; } inline std::pair<bool, double> passesThreshold(row_type row, const double* query, double theta) const { std::pair<bool, double> p; double ip = 1; if (normalized) { ip = query[-1] * getVectorLength(row); if (ip < theta) { p.first = false; return p; } } ip *= cosine(row, query); p.second = ip; if (ip < theta) { p.first = false; return p; } else { p.first = true; return p; } } }; // ignores the lengths inline void splitMatrices(const VectorMatrix& originalMatrix, std::vector<VectorMatrix>& matrices) { row_type threads = matrices.size(); if (threads == 1) { matrices[0].initializeBasics(originalMatrix.colNum, originalMatrix.rowNum, false); for (int i = 0; i < matrices[0].rowNum; ++i) { const double* vec = originalMatrix.getMatrixRowPtr(i); matrices[0].lengthInfo[i] = QueueElement(1, i); matrices[0].setLengthInData(i, 1); double * d1 = matrices[0].getMatrixRowPtr(i); scaleAndCopy(d1, vec, 1, originalMatrix.colNum); } } else { omp_set_num_threads(threads); std::vector<row_type> permuteVector(originalMatrix.rowNum); std::iota(permuteVector.begin(), permuteVector.end(), 0); rg::Random32 random(123); rg::shuffle(permuteVector.begin(), permuteVector.end(), random); std::vector<row_type> blockOffsets; computeDefaultBlockOffsets(permuteVector.size(), threads, blockOffsets); #pragma omp parallel { row_type tid = omp_get_thread_num(); row_type start = blockOffsets[tid]; row_type end = (tid == blockOffsets.size() - 1 ? originalMatrix.rowNum : blockOffsets[tid + 1]); matrices[tid].initializeBasics(originalMatrix.colNum, end - start, true); for (int i = start; i < end; ++i) { row_type ind = permuteVector[i]; const double* vec = originalMatrix.getMatrixRowPtr(ind); matrices[tid].lengthInfo[i - start] = QueueElement(1, i - start); matrices[tid].setLengthInData(i - start, 1); double * d1 = matrices[tid].getMatrixRowPtr(i - start); scaleAndCopy(d1, vec, 1, originalMatrix.colNum); matrices[tid].lengthInfo[i - start].id = ind; // the original id } } } } /* map: id: original matrix id, first: thread second: posInMatrix */ inline void initializeMatrices(const VectorMatrix& originalMatrix, std::vector<VectorMatrix>& matrices, bool sort, bool ignoreLengths, double epsilon = 0) { row_type threads = matrices.size(); if (threads == 1) { matrices[0].initializeBasics(originalMatrix.colNum, originalMatrix.rowNum, true); if (ignoreLengths) { #if defined(ABS_APPROX) || defined(HYBRID_APPROX) matrices[0].epsilonEquivalents.resize(matrices[0].rowNum, epsilon); #endif for (int i = 0; i < matrices[0].rowNum; ++i) { const double* vec = originalMatrix.getMatrixRowPtr(i); double len = calculateLength(vec, matrices[0].colNum); matrices[0].lengthInfo[i] = QueueElement(1, i); matrices[0].setLengthInData(i, 1); double x = 1 / len; double * d1 = matrices[0].getMatrixRowPtr(i); scaleAndCopy(d1, vec, x, originalMatrix.colNum); #if defined(ABS_APPROX) || defined(HYBRID_APPROX) matrices[0].epsilonEquivalents[i] *= x; #endif } } else { for (int i = 0; i < matrices[0].rowNum; ++i) { const double* vec = originalMatrix.getMatrixRowPtr(i); double len = calculateLength(vec, matrices[0].colNum); matrices[0].lengthInfo[i] = QueueElement(len, i); } if (sort) { matrices[0].shuffled = true; std::sort(matrices[0].lengthInfo.begin(), matrices[0].lengthInfo.end(), std::greater<QueueElement>()); } for (int i = 0; i < matrices[0].rowNum; ++i) { matrices[0].setLengthInData(i, matrices[0].lengthInfo[i].data); double x = 1 / matrices[0].lengthInfo[i].data; double * d1 = matrices[0].getMatrixRowPtr(i); double * d2 = originalMatrix.getMatrixRowPtr(matrices[0].lengthInfo[i].id); scaleAndCopy(d1, d2, x, originalMatrix.colNum); } } } else { // multiple threads omp_set_num_threads(threads); std::vector<row_type> permuteVector(originalMatrix.rowNum); std::iota(permuteVector.begin(), permuteVector.end(), 0); rg::Random32 random(123); rg::shuffle(permuteVector.begin(), permuteVector.end(), random); std::vector<row_type> blockOffsets; computeDefaultBlockOffsets(permuteVector.size(), threads, blockOffsets); #pragma omp parallel { row_type tid = omp_get_thread_num(); row_type start = blockOffsets[tid]; row_type end = (tid == blockOffsets.size() - 1 ? originalMatrix.rowNum : blockOffsets[tid + 1]); matrices[tid].initializeBasics(originalMatrix.colNum, end - start, true); if (ignoreLengths) { #if defined(ABS_APPROX) || defined(HYBRID_APPROX) matrices[tid].epsilonEquivalents.resize(matrices[tid].rowNum, epsilon); #endif for (int i = start; i < end; ++i) { row_type ind = permuteVector[i]; const double* vec = originalMatrix.getMatrixRowPtr(ind); double len = calculateLength(vec, matrices[tid].colNum); matrices[tid].lengthInfo[i - start] = QueueElement(1, i - start); matrices[tid].setLengthInData(i - start, 1); double x = 1 / len; double * d1 = matrices[tid].getMatrixRowPtr(i - start); scaleAndCopy(d1, vec, x, originalMatrix.colNum); matrices[tid].lengthInfo[i - start].id = ind; // the original id #if defined(ABS_APPROX) || defined(HYBRID_APPROX) matrices[tid].epsilonEquivalents[i] *= x; #endif } } else { for (int i = start; i < end; ++i) { row_type ind = permuteVector[i]; const double* vec = originalMatrix.getMatrixRowPtr(ind); double len = calculateLength(vec, matrices[tid].colNum); matrices[tid].lengthInfo[i - start] = QueueElement(len, i - start); } if (sort) { matrices[tid].shuffled = true; std::sort(matrices[tid].lengthInfo.begin(), matrices[tid].lengthInfo.end(), std::greater<QueueElement>()); } for (int i = 0; i < matrices[tid].rowNum; ++i) { matrices[tid].setLengthInData(i, matrices[tid].lengthInfo[i].data); double x = 1 / matrices[tid].lengthInfo[i].data; row_type ind = permuteVector[matrices[tid].lengthInfo[i].id + start]; double * d1 = matrices[tid].getMatrixRowPtr(i); double * d2 = originalMatrix.getMatrixRowPtr(ind); scaleAndCopy(d1, d2, x, originalMatrix.colNum); matrices[tid].lengthInfo[i].id = ind; // the original id } } } } } void calculateAPneededForQuery(std::vector<VectorMatrix>& matrices, double thres, int k, std::vector<double>& global_cweights) { global_cweights.resize(matrices[0].colNum, 0); #pragma omp parallel { row_type tid = omp_get_thread_num(); col_type colNum = matrices[tid].colNum; row_type rowNum = matrices[tid].rowNum; row_type endUser = rowNum; if (k == 0) { auto up = std::lower_bound(matrices[tid].lengthInfo.begin(), matrices[tid].lengthInfo.end(), QueueElement(thres, 0), std::greater<QueueElement>()); endUser = up - matrices[tid].lengthInfo.begin(); } matrices[tid].cweights.resize(colNum); matrices[tid].maxVectorCoord.resize(rowNum); matrices[tid].vectorNNZ.resize(rowNum, 0); for (int i = 0; i < endUser; ++i) { double * d = matrices[tid].getMatrixRowPtr(i); for (int j = 0; j < colNum; ++j) { if (matrices[tid].cweights[j] < fabs(d[j])) matrices[tid].cweights[j] = fabs(d[j]); if (d[j] != 0) matrices[tid].vectorNNZ[i]++; if (fabs(d[j]) > matrices[tid].maxVectorCoord[i]) matrices[tid].maxVectorCoord[i] = fabs(d[j]); } } #pragma omp critical { for (int i = 0; i < colNum; ++i) { if (global_cweights[i] < matrices[tid].cweights[i]) global_cweights[i] = matrices[tid].cweights[i]; } } } } } #endif /* VECTORMATRIX_H_ */
pr34607.c
/* PR c++/34607 */ /* { dg-do compile } */ /* { dg-options "-fopenmp -std=gnu99" } */ void foo () { #pragma omp for for (int i =; i < 2; ++i) /* { dg-error "expected expression before" } */ ; #pragma omp for for (T i = 54; i < 56; i++) /* { dg-error "unknown type name" } */ ; T j; /* { dg-error "unknown type name" } */ #pragma omp for for (j = 1; j < 3; j++) ; }
parallel.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <algorithm> #ifdef PADDLE_WITH_MKLML #include <omp.h> #include "lite/backends/x86/mklml.h" #endif namespace paddle { namespace lite { namespace x86 { static void SetNumThreads(int num_threads) { #ifdef PADDLE_WITH_MKLML int real_num_threads = (std::max)(num_threads, 1); #ifdef LITE_WITH_STATIC_MKL MKL_Set_Num_Threads(real_num_threads); #else x86::MKL_Set_Num_Threads(real_num_threads); #endif omp_set_num_threads(real_num_threads); #endif } static inline int64_t GetMaxThreads() { int64_t num_threads = 1; #ifdef PADDLE_WITH_MKLML // Do not support nested omp parallem. num_threads = omp_in_parallel() ? 1 : omp_get_max_threads(); #endif return (std::max<int>)(num_threads, 1L); } using ThreadHandler = std::function<void(const int64_t begin, const int64_t end)>; static inline void RunParallelFor(const int64_t begin, const int64_t end, const ThreadHandler& f) { if (begin >= end) { return; } #ifdef PADDLE_WITH_MKLML int64_t num_threads = (std::min)(GetMaxThreads(), end - begin); if (num_threads > 1) { #pragma omp parallel num_threads(num_threads) { int64_t tid = omp_get_thread_num(); int64_t chunk_size = (end - begin + num_threads - 1) / num_threads; int64_t begin_tid = begin + tid * chunk_size; f(begin_tid, (std::min)(end, chunk_size + begin_tid)); } return; } #endif f(begin, end); } } // namespace x86 } // namespace lite } // namespace paddle
GB_binop__ne_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ne_fp64 // A.*B function (eWiseMult): GB_AemultB__ne_fp64 // A*D function (colscale): GB_AxD__ne_fp64 // D*A function (rowscale): GB_DxB__ne_fp64 // C+=B function (dense accum): GB_Cdense_accumB__ne_fp64 // C+=b function (dense accum): GB_Cdense_accumb__ne_fp64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_fp64 // C=scalar+B GB_bind1st__ne_fp64 // C=scalar+B' GB_bind1st_tran__ne_fp64 // C=A+scalar GB_bind2nd__ne_fp64 // C=A'+scalar GB_bind2nd_tran__ne_fp64 // C type: bool // A type: double // B,b type: double // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_FP64 || GxB_NO_NE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ne_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ne_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ne_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ne_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ne_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__ne_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ne_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ne_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ne_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__ne_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__ne_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SectionsEndLink.c
int main() { #pragma omp sections { } #pragma omp sections { #pragma omp section { int x; } } #pragma omp sections { #pragma omp section { 12; } #pragma omp section { 13; } } #pragma omp sections { #pragma omp section { 14; } } #pragma omp sections { #pragma omp section { int x; } #pragma omp section { 15; } #pragma omp section { int x; } } #pragma omp sections { #pragma omp section { int x; } #pragma omp section { int x; } } }
data_env_scalar_map.c
#include <stdio.h> #include <omp.h> int main(int argc, char *argv[], char **envp) { int numdev = omp_get_num_devices(); printf ("Machine has %d GPU device%s\n", numdev, (numdev==1 ? "" : "s") ); int from = 13; int tofrom = 17; printf("ON HOST before: from = %d, tofrom = %d\n", from, tofrom); #pragma omp target data map(from:from) map(tofrom:tofrom) #pragma omp target { printf("ON GPU: enter from = %d, tofrom = %d\n", from, tofrom); from = 5; tofrom = 5; printf("ON GPU: exit from = %d, tofrom = %d\n", from, tofrom); } // This should print ON HOST after: from = 5, tofrom = 5 printf("ON HOST after: from = %d, tofrom = %d\n", from, tofrom); return 0; }
MathTools.h
/** * \file * \copyright * Copyright (c) 2012-2020, OpenGeoSys Community (http://www.opengeosys.org) * Distributed under a Modified BSD License. * See accompanying file LICENSE.txt or * http://www.opengeosys.org/project/license */ #pragma once #include <cstddef> #ifdef _OPENMP #include <omp.h> #endif namespace MathLib { /** * standard inner product in R^N * \param v0 array of type T representing the vector * \param v1 array of type T representing the vector * */ template<typename T, int N> inline T scalarProduct(T const * const v0, T const * const v1) { T res (v0[0] * v1[0]); #pragma omp parallel for reduction (+:res) for (int k = 1; k < N; k++) { res += v0[k] * v1[k]; } return res; } template <> inline double scalarProduct<double,3>(double const * const v0, double const * const v1) { double res (v0[0] * v1[0]); for (std::size_t k(1); k < 3; k++) { res += v0[k] * v1[k]; } return res; } template <typename T> inline T scalarProduct(T const* const v0, T const* const v1, int const n) { T res (v0[0] * v1[0]); #pragma omp parallel for reduction (+:res) for (int k = 1; k < n; k++) { res += v0[k] * v1[k]; } return res; } /** * calcProjPntToLineAndDists computes the orthogonal projection * of a point p to the line described by the points a and b, * \f$g(\lambda) = a + \lambda (b - a)\f$, * the distance between p and the projected point * and the distances between the projected point and the end * points a, b of the line * \param p the (mesh) point * \param a first point of line * \param b second point of line * \param lambda the projected point described by the line equation above * \param d0 distance to the line point a * \returns the distance between p and the orthogonal projection of p */ double calcProjPntToLineAndDists(const double p[3], const double a[3], const double b[3], double &lambda, double &d0); /** squared dist between double arrays p0 and p1 (size of arrays is 3) */ inline double sqrDist(const double* p0, const double* p1) { const double v[3] = {p1[0] - p0[0], p1[1] - p0[1], p1[2] - p0[2]}; return scalarProduct<double,3>(v,v); } /** * Let \f$p_0, p_1, p_2 \in R^3\f$. The function getAngle * computes the angle between the edges \f$(p_0,p_1)\f$ and \f$(p_1,p_2)\f$ * @param p0 start point of edge 0 * @param p1 end point of edge 0 and start point of edge 1 * @param p2 end point of edge 1 * @return the angle between the edges */ double getAngle (const double p0[3], const double p1[3], const double p2[3]); } // namespace MathLib
DRB019-plusplus-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Race condition on outLen due to unprotected writes. Adding private (outLen) can avoid race condition. But it is wrong semantically. Data race pairs: we allow two pair to preserve the original code pattern. 1. outLen@72:12 vs. outLen@72:12 2. output[]@72:5 vs. output[]@72:5 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i ; int inLen=1000 ; int outLen = 0; if (argc>1) inLen= atoi(argv[1]); int input[inLen]; int output[inLen]; for (i=0; i<inLen; ++i) input[i]=i; #pragma omp parallel for for (i=0; i<inLen; ++i) { output[outLen++] = input[i] ; } printf("output[0]=%d\n", output[0]); return 0; }
6860.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[200 + 0][200 + 0][200 + 0], double B[200 + 0][200 + 0][200 + 0]) { int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 1000; t2 += 1) { #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 32) for (t10 = t8; t10 <= (t8 + 31 < n - 2 ? t8 + 31 : n - 2); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 1) B[t6][t10][t12] = 0.125 * (A[t6 + 1][t10][t12] - 2 * A[t6][t10][t12] + A[t6 - 1][t10][t12]) + 0.125 * (A[t6][t10 + 1][t12] - 2 * A[t6][t10][t12] + A[t6][t10 - 1][t12]) + 0.125 * (A[t6][t10][t12 + 1] - 2 * A[t6][t10][t12] + A[t6][t10][t12 - 1]) + A[t6][t10][t12]; #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 32) for (t10 = t8; t10 <= (t8 + 31 < n - 2 ? t8 + 31 : n - 2); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 1) A[t6][t10][t12] = 0.125 * (B[t6 + 1][t10][t12] - 2 * B[t6][t10][t12] + B[t6 - 1][t10][t12]) + 0.125 * (B[t6][t10 + 1][t12] - 2 * B[t6][t10][t12] + B[t6][t10 - 1][t12]) + 0.125 * (B[t6][t10][t12 + 1] - 2 * B[t6][t10][t12] + B[t6][t10][t12 - 1]) + B[t6][t10][t12]; } }
lastpass_fmt_plug.c
/* LastPass offline cracker patch for JtR. Hacked together during January of 2013 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * All the hard work was done by Milen (author of hashkill). * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_lastpass; #elif FMT_REGISTERS_H john_register_one(&fmt_lastpass); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "johnswap.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include <openssl/aes.h> #include "pbkdf2_hmac_sha256.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "lp" #define FORMAT_NAME "LastPass offline" #define FORMAT_TAG "$lp$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests lastpass_tests[] = { {"$lp$hackme@mailinator.com$6f5d8cec3615fc9ac7ba2e0569bce4f5", "strongpassword"}, {"$lp$3$27c8641d7f5ab5985569d9d0b499b467", "123"}, {"$lp$ninechars$d09153108a89347da5c97a4a18f91345", "PassWord"}, {"$lp$anicocls$764b0f54528eb4a4c93aab1b18af28a5", ""}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; static struct custom_salt { int iterations; int salt_length; unsigned char salt[32]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "$")) == NULL) /* email */ goto err; if (strlen(p) > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* hash */ goto err; if (hexlenl(p, &extra) != 32 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$lp$" */ p = strtokm(ctcopy, "$"); strncpy((char*)cs.salt, p, 32); cs.salt_length = strlen((char*)p); MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { AES_KEY akey; #ifdef SIMD_COEF_32 int lens[MAX_KEYS_PER_CRYPT], i; unsigned char *pin[MAX_KEYS_PER_CRYPT]; ARCH_WORD_32 key[MAX_KEYS_PER_CRYPT][8]; union { ARCH_WORD_32 *pout[MAX_KEYS_PER_CRYPT]; unsigned char *poutc; } x; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; x.pout[i] = key[i]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->salt_length, 500, &(x.poutc), 32, 0); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { memset(&akey, 0, sizeof(AES_KEY)); AES_set_encrypt_key((unsigned char*)key[i], 256, &akey); AES_ecb_encrypt((unsigned char*)"lastpass rocks\x02\x02", (unsigned char*)crypt_out[i+index], &akey, AES_ENCRYPT); } #else unsigned char key[32]; pbkdf2_sha256((unsigned char*)saved_key[index], strlen(saved_key[index]), cur_salt->salt, cur_salt->salt_length, 500, key, 32, 0); memset(&akey, 0, sizeof(AES_KEY)); AES_set_encrypt_key((unsigned char*)key, 256, &akey); AES_ecb_encrypt((unsigned char*)"lastpass rocks\x02\x02", (unsigned char*)crypt_out[index], &akey, AES_ENCRYPT); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void lastpass_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_lastpass = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, lastpass_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, lastpass_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
DRB097-target-teams-distribute-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> /* use of omp target + teams + distribute + parallel for */ int main(int argc, char * argv[]) { int i, i2; int len = 2560; double sum = 0.0, sum2 = 0.0; double a[len], b[len]; /* Initialize with some values */ int _ret_val_0; #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { a[i]=(((double)i)/2.0); b[i]=(((double)i)/3.0); } #pragma cetus private(i, i2) #pragma loop name main#1 #pragma cetus reduction(+: sum) #pragma cetus parallel #pragma omp parallel for private(i, i2) reduction(+: sum) for (i2=0; i2<len; i2+=256) { #pragma cetus private(i) #pragma loop name main#1#0 #pragma cetus reduction(+: sum) #pragma cetus parallel #pragma omp parallel for private(i) reduction(+: sum) for (i=i2; i<(((i2+256)<len) ? (i2+256) : len); i ++ ) { sum+=(a[i]*b[i]); } } /* CPU reference computation */ #pragma cetus private(i) #pragma loop name main#2 #pragma cetus reduction(+: sum2) #pragma cetus parallel #pragma omp parallel for private(i) reduction(+: sum2) for (i=0; i<len; i ++ ) { sum2+=(a[i]*b[i]); } printf("sum=%lf sum2=%lf\n", sum, sum2); _ret_val_0=0; return _ret_val_0; }
lis_precon_sainv.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <stdlib.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif #include <string.h> #include <stdarg.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" #undef __FUNC__ #define __FUNC__ "lis_precon_create_sainv" LIS_INT lis_precon_create_sainv(LIS_SOLVER solver, LIS_PRECON precon) { LIS_INT err; LIS_MATRIX A,B; LIS_DEBUG_FUNC_IN; switch( solver->A->matrix_type ) { case LIS_MATRIX_CRS: err = lis_precon_create_sainv_crs(solver,precon); break; default: A = solver->A; err = lis_matrix_duplicate(A,&B); if( err ) return err; lis_matrix_set_type(B,LIS_MATRIX_CRS); err = lis_matrix_convert(A,B); if( err ) return err; solver->A = B; err = lis_precon_create_sainv_crs(solver,precon); lis_matrix_destroy(B); solver->A = A; break; } #ifndef USE_QUAD_PRECISION err = lis_vector_duplicate(solver->A,&precon->temp); #else if( solver->precision==LIS_PRECISION_DEFAULT ) { err = lis_vector_duplicate(solver->A,&precon->temp); } else { err = lis_vector_duplicateex(LIS_PRECISION_QUAD,solver->A,&precon->temp); } #endif if( err ) return err; precon->A = solver->A; precon->is_copy = LIS_FALSE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /******************************************** W = I Z = I for i=1,...,n l = A * Z_i u = W_i^T * A D_ii = u * Z_i for j>i, l_j!=0 W_j = W_j - drop( (l_j/D_ii)*W_i, tol ) for j>i, u_j!=0 Z_j = Z_j - drop( (u_j/D_ii)*Z_i, tol ) ********************************************/ #if 1 #undef __FUNC__ #define __FUNC__ "lis_precon_create_sainv_crs" LIS_INT lis_precon_create_sainv_crs(LIS_SOLVER solver, LIS_PRECON precon) { LIS_INT err; LIS_INT i,j,k,ii,jj,ik,jk; LIS_INT n,annz,cl,cu; LIS_INT *ww,*il,*iu; LIS_SCALAR t,dd,tol; LIS_SCALAR nrm; LIS_SCALAR *d,*l,*u; LIS_MATRIX A,B; LIS_MATRIX_ILU W,Z; LIS_VECTOR D; LIS_DEBUG_FUNC_IN; A = solver->A; n = A->n; tol = solver->params[LIS_PARAMS_DROP-LIS_OPTIONS_LEN]; annz = A->n / 10; W = NULL; ww = NULL; d = NULL; l = NULL; u = NULL; il = NULL; iu = NULL; err = lis_matrix_ilu_create(n,1,&W); if( err ) return err; err = lis_matrix_ilu_create(n,1,&Z); if( err ) return err; err = lis_matrix_ilu_setCR(W); if( err ) return err; err = lis_matrix_ilu_setCR(Z); if( err ) return err; err = lis_vector_duplicate(A,&D); if( err ) return err; d = D->value; err = lis_matrix_ilu_premalloc(annz,W); if( err ) return err; err = lis_matrix_ilu_premalloc(annz,Z); if( err ) return err; l = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_precon_create_sainv_crs::l"); if( l==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } u = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_precon_create_sainv_crs::u"); if( u==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } il = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_precon_create_sainv_crs::il"); if( il==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } iu = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_precon_create_sainv_crs::iu"); if( iu==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } ww = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_precon_create_sainv_crs::ww"); if( ww==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } err = lis_matrix_duplicate(A,&B); if( err ) return err; err = lis_matrix_convert_crs2ccs(A,B); if( err ) { return err; } for(i=0;i<n;i++) ww[i] = 0; for(i=0;i<n;i++) { W->value[i][0] = 1.0; W->index[i][0] = i; W->nnz[i] = 1; Z->value[i][0] = 1.0; Z->index[i][0] = i; Z->nnz[i] = 1; } for(i=0; i<n; i++) { /* nrm_inf(A[i,:]) */ nrm = 0.0; for(j=A->ptr[i];j<A->ptr[i+1];j++) { nrm = _max(nrm,fabs(A->value[j])); } nrm = 1.0/nrm; /* l = AZ_i */ cl = 0; memset(l,0,n*sizeof(LIS_SCALAR)); for(k=0;k<Z->nnz[i];k++) { ii = Z->index[i][k]; for(j=B->ptr[ii];j<B->ptr[ii+1];j++) { jj = B->index[j]; if( jj>i ) { l[jj] += B->value[j]*Z->value[i][k]; if( ww[jj]==0 ) { ww[jj] = 1; il[cl++] = jj; } } } } for(k=0;k<cl;k++) ww[il[k]] = 0; /* u = W_i'A */ cu = 0; memset(u,0,n*sizeof(LIS_SCALAR)); for(k=0;k<W->nnz[i];k++) { ii = W->index[i][k]; for(j=A->ptr[ii];j<A->ptr[ii+1];j++) { jj = A->index[j]; #ifdef USE_MPI if( jj>n-1 ) continue; #endif u[jj] += A->value[j]*W->value[i][k]; if( jj>i && ww[jj]==0 ) { ww[jj] = 1; iu[cu++] = jj; } } } for(k=0;k<cu;k++) ww[iu[k]] = 0; /* d_ii = uZ_i or W_i'l */ t = 0.0; for(k=0;k<Z->nnz[i];k++) { t += u[Z->index[i][k]]*Z->value[i][k]; } d[i] = 1.0/t; /* for j>i, l_j!=0 */ /* w_j = w_j - (l_j/d_ii)*w_i */ for(jj=0;jj<cl;jj++) { j = il[jj]; dd = l[j]*d[i]; for(k=0;k<W->nnz[j];k++) { ww[W->index[j][k]] = k+1; } for(ik=0;ik<W->nnz[i];ik++) { jk = ww[W->index[i][ik]]; if( jk!=0 ) { t = dd*W->value[i][ik]; if( fabs(t)*nrm > tol ) { W->value[j][jk-1] -= t; } } else { t = dd*W->value[i][ik]; if( fabs(t)*nrm > tol ) { if( W->nnz[j] == W->nnz_ma[j] ) { W->nnz_ma[j] += annz; err = lis_matrix_ilu_realloc(j,W->nnz_ma[j],W); if( err ) return err; } jk = W->nnz[j]; W->index[j][jk] = W->index[i][ik]; W->value[j][jk] = -t; W->nnz[j]++; } } } for(k=0;k<W->nnz[j];k++) { ww[W->index[j][k]] = 0; } } /* for j>i, u_j!=0 */ /* z_j = z_j - (u_j/d_ii)*z_i */ for(jj=0;jj<cu;jj++) { j = iu[jj]; dd = u[j]*d[i]; for(k=0;k<Z->nnz[j];k++) { ww[Z->index[j][k]] = k+1; } for(ik=0;ik<Z->nnz[i];ik++) { jk = ww[Z->index[i][ik]]; if( jk!=0 ) { t = dd*Z->value[i][ik]; if( fabs(t)*nrm > tol ) { Z->value[j][jk-1] -= t; } } else { t = dd*Z->value[i][ik]; if( fabs(t)*nrm > tol ) { if( Z->nnz[j] == Z->nnz_ma[j] ) { Z->nnz_ma[j] += annz; err = lis_matrix_ilu_realloc(j,Z->nnz_ma[j],Z); if( err ) return err; } jk = Z->nnz[j]; Z->index[j][jk] = Z->index[i][ik]; Z->value[j][jk] = -t; Z->nnz[j]++; } } } for(k=0;k<Z->nnz[j];k++) { ww[Z->index[j][k]] = 0; } } } lis_matrix_destroy(B); lis_free2(5,l,u,ww,il,iu); precon->L = W; precon->U = Z; precon->D = D; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #else /******************************************** for i=1,...,n W_i = e_i Z_i = e_i r = e_i^T * A c = A * e_i for j=1,...,i-1 W_i = W_i - drop( (r*Z_j/D_jj)*W_j, tol ) for j=1,...,i-1 Z_i = Z_i - drop( (W_j^T*c/D_jj)*Z_j, tol ) l = A * Z_i D_ii = W_i^T * A * Z_i ********************************************/ #undef __FUNC__ #define __FUNC__ "lis_precon_create_sainv_crs" LIS_INT lis_precon_create_sainv_crs(LIS_SOLVER solver, LIS_PRECON precon) { LIS_INT err; LIS_INT i,j,k,ii,jj,len,lfil; LIS_INT n,nnz,annz,cl,cu,cc,m; LIS_INT *wu,*wl,*il,*iu,*ic,*pc; LIS_SCALAR t,tol_dd,tol,v; LIS_SCALAR nrm; LIS_SCALAR *d,*r,*c,*l,*u,*tmp; LIS_MATRIX A,B; LIS_MATRIX_ILU W,Z; LIS_VECTOR D; LIS_DEBUG_FUNC_IN; A = solver->A; n = A->n; nnz = A->nnz; tol = solver->params[LIS_PARAMS_DROP-LIS_OPTIONS_LEN]; m = solver->params[LIS_PARAMS_RATE-LIS_OPTIONS_LEN]; annz = 10+A->nnz / A->n; lfil = (LIS_INT)((double)A->nnz/(2.0*n))*m; W = NULL; Z = NULL; wu = NULL; wl = NULL; d = NULL; l = NULL; u = NULL; il = NULL; iu = NULL; err = lis_matrix_ilu_create(n,1,&W); if( err ) return err; err = lis_matrix_ilu_create(n,1,&Z); if( err ) return err; err = lis_matrix_ilu_setCR(W); if( err ) return err; err = lis_matrix_ilu_setCR(Z); if( err ) return err; err = lis_vector_duplicate(A,&D); if( err ) return err; d = D->value; tmp = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_precon_create_sainv_crs::l"); if( tmp==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } r = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_precon_create_sainv_crs::l"); if( r==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } c = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_precon_create_sainv_crs::u"); if( c==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } l = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_precon_create_sainv_crs::l"); if( l==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } u = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_precon_create_sainv_crs::u"); if( u==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } il = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_precon_create_sainv_crs::il"); if( il==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } iu = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_precon_create_sainv_crs::iu"); if( iu==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } ic = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_precon_create_sainv_crs::iu"); if( ic==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } wu = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_precon_create_sainv_crs::ww"); if( wu==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } wl = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_precon_create_sainv_crs::ww"); if( wl==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } pc = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_precon_create_sainv_crs::iu"); if( pc==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } lis_matrix_sort_crs(A); err = lis_matrix_duplicate(A,&B); if( err ) return err; err = lis_matrix_convert_crs2ccs(A,B); if( err ) return err; for(i=0;i<n;i++) { wu[i] = 0; wl[i] = 0; pc[i] = A->ptr[i]; } for(i=0; i<n; i++) { /* nrm_inf(A[i,:]) */ nrm = 0.0; for(j=A->ptr[i];j<A->ptr[i+1];j++) { nrm = _max(nrm,fabs(A->value[j])); } tol_dd = nrm * tol; /* l = e_i */ /* u = e_i */ l[i] = 1.0; u[i] = 1.0; il[0] = i; iu[0] = i; cl = 1; cu = 1; wu[i] = 1; wl[i] = 1; cc = 0; /* r = e_i^T*A */ for(j=A->ptr[i];j<A->ptr[i+1];j++) { jj = A->index[j]; r[jj] = A->value[j]; } /* c = A_i = A*e_i */ for(j=B->ptr[i];j<B->ptr[i+1];j++) { jj = B->index[j]; c[jj] = B->value[j]; } /* W_i = W_i - (r*Z_j/D_jj)*W_j */ for(j=0;j<i;j++) { t = 0.0; for(k=0;k<Z->nnz[j];k++) { t += r[Z->index[j][k]]*Z->value[j][k]; } t = t * d[j]; if( fabs(t) > tol_dd ) { for(k=0;k<W->nnz[j];k++) { v = t * W->value[j][k]; if( fabs(v) > tol_dd ) { jj = W->index[j][k]; if( wl[jj]==1 ) { l[jj] -= v; } else { l[jj] = -v; il[cl++] = jj; wl[jj] = 1; } } } } } /* Z_i = Z_i - (W_j^T*c/D_jj)*Z_j */ for(j=0;j<i;j++) { t = 0.0; for(k=0;k<W->nnz[j];k++) { t += c[W->index[j][k]]*W->value[j][k]; } t = t * d[j]; if( fabs(t) > tol_dd ) { for(k=0;k<Z->nnz[j];k++) { v = t * Z->value[j][k]; if( fabs(v) > tol_dd ) { jj = Z->index[j][k]; if( wu[jj]==1 ) { u[jj] -= v; } else { u[jj] = -v; iu[cu++] = jj; wu[jj] = 1; } } } } } /* len = _min(lfil,cl); for(j=0;j<cl;j++) tmp[j] = fabs(l[il[j]]); lis_sort_di(0,cl-1,tmp,il); lis_sort_i(0,len-1,il); cl = len; */ /* k = cl; for(j=0;j<cl;j++) { if( fabs(l[il[j]])<= tol_dd ) { wl[il[j]] = 0; il[j] = n; k--; } } lis_sort_i(0,cl-1,il); cl = k; k = cu; for(j=0;j<cu;j++) { if( fabs(u[iu[j]])<= tol_dd ) { wu[iu[j]] = 0; iu[j] = n; k--; } } lis_sort_i(0,cu-1,iu); cu = k; */ W->nnz[i] = cl; if( cl > 0 ) { W->index[i] = (LIS_INT *)malloc(cl*sizeof(LIS_INT)); W->value[i] = (LIS_SCALAR *)malloc(cl*sizeof(LIS_SCALAR)); memcpy(W->index[i],il,cl*sizeof(LIS_INT)); for(j=0;j<cl;j++) { W->value[i][j] = l[il[j]]; } } Z->nnz[i] = cu; if( cu > 0 ) { Z->index[i] = (LIS_INT *)malloc(cu*sizeof(LIS_INT)); Z->value[i] = (LIS_SCALAR *)malloc(cu*sizeof(LIS_SCALAR)); memcpy(Z->index[i],iu,cu*sizeof(LIS_INT)); for(j=0;j<cu;j++) { Z->value[i][j] = u[iu[j]]; } } for(j=A->ptr[i];j<A->ptr[i+1];j++) r[A->index[j]] = 0.0; for(j=B->ptr[i];j<B->ptr[i+1];j++) c[B->index[j]] = 0.0; for(j=0;j<cl;j++) { wl[il[j]] = 0; l[il[j]] = 0.0; } for(j=0;j<cu;j++) { wu[iu[j]] = 0; } /* D_ii = W_i^T * A * Z_i */ cl = 0; for(k=0;k<Z->nnz[i];k++) { ii = Z->index[i][k]; for(j=B->ptr[ii];j<B->ptr[ii+1];j++) { jj = B->index[j]; if( wl[jj]==0 ) { l[jj] = B->value[j]*Z->value[i][k]; wl[jj] = 1; il[cl++] = jj; } else { l[jj] += B->value[j]*Z->value[i][k]; } } } t = 0.0; for(j=0;j<W->nnz[i];j++) { k = W->index[i][j]; t += W->value[i][j] * l[k]; } d[i] = 1.0 / t; for(j=0;j<cl;j++) wl[il[j]] = 0; } lis_matrix_destroy(B); lis_free2(11,r,c,il,l,wl,iu,u,wu,ic,pc,tmp); precon->L = W; precon->U = Z; precon->D = D; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #endif #undef __FUNC__ #define __FUNC__ "lis_psolve_sainv" LIS_INT lis_psolve_sainv(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X) { LIS_INT i,n; LIS_MATRIX A; LIS_MATRIX_ILU W,Z; LIS_VECTOR t,d; LIS_PRECON precon; LIS_QUAD_DECLAR; #ifdef USE_QUAD_PRECISION LIS_SCALAR *xl; #endif /* * x = Mb * M = ZD^{-1}W' */ LIS_DEBUG_FUNC_IN; precon = solver->precon; A = precon->A; W = precon->L; Z = precon->U; d = precon->D; t = precon->temp; n = precon->L->n; #ifdef USE_QUAD_PRECISION xl = X->value_lo; #endif #ifdef USE_QUAD_PRECISION if( B->precision==LIS_PRECISION_DEFAULT ) { #endif lis_matvect_ilu(A,W,B,X); #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { t->value[i] = X->value[i]*d->value[i]; } lis_matvec_ilu(A,Z,t,X); #ifdef USE_QUAD_PRECISION } else { lis_matvect_ilu(A,W,B,X); #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0;i<n;i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(t->value[i],t->value_lo[i],X->value[i],X->value_lo[i],d->value[i]); #else LIS_QUAD_MULD_SSE2(t->value[i],t->value_lo[i],X->value[i],X->value_lo[i],d->value[i]); #endif /* t->value[i] = X->value[i]*d->value[i]; */ } lis_matvec_ilu(A,Z,t,X); } #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_psolvet_sainv" LIS_INT lis_psolvet_sainv(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X) { LIS_INT i,n; LIS_MATRIX A; LIS_MATRIX_ILU W,Z; LIS_VECTOR t,d; LIS_PRECON precon; LIS_QUAD_DECLAR; #ifdef USE_QUAD_PRECISION LIS_SCALAR *xl; #endif /* * x = M'b * M' = WD^{-1}Z' */ LIS_DEBUG_FUNC_IN; precon = solver->precon; A = precon->A; W = precon->L; Z = precon->U; d = precon->D; t = precon->temp; n = precon->L->n; #ifdef USE_QUAD_PRECISION xl = X->value_lo; #endif #ifdef USE_QUAD_PRECISION if( B->precision==LIS_PRECISION_DEFAULT ) { #endif lis_matvect_ilu(A,Z,B,X); #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { t->value[i] = X->value[i]*d->value[i]; } lis_matvec_ilu(A,W,t,X); #ifdef USE_QUAD_PRECISION } else { lis_matvect_ilu(A,Z,B,X); #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0;i<n;i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(t->value[i],t->value_lo[i],X->value[i],X->value_lo[i],d->value[i]); #else LIS_QUAD_MULD_SSE2(t->value[i],t->value_lo[i],X->value[i],X->value_lo[i],d->value[i]); #endif /* t->value[i] = X->value[i]*d->value[i]; */ } lis_matvec_ilu(A,W,t,X); } #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; }
GB_sparse_emult_template.c
//------------------------------------------------------------------------------ // GB_sparse_emult_template: C=A.*B, C<M or !M>=A.*B when C is sparse/hyper //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Computes C=A.*B, C<M>=A.*B, or C<!M>=A.*B when C is sparse or hypersparse: // ------------------------------------------ // C = A .* B // ------------------------------------------ // sparse . sparse sparse // sparse . sparse bitmap // sparse . sparse full // sparse . bitmap sparse // sparse . full sparse // ------------------------------------------ // C <M>= A .* B // ------------------------------------------ // sparse sparse sparse sparse // sparse sparse sparse bitmap // sparse sparse sparse full // sparse sparse bitmap sparse // sparse sparse bitmap bitmap // sparse sparse bitmap full // sparse sparse full sparse // sparse sparse full bitmap // ------------------------------------------ // C <M> = A .* B // ------------------------------------------ // sparse bitmap sparse sparse // sparse bitmap sparse bitmap // sparse bitmap sparse full // sparse bitmap bitmap sparse // sparse bitmap full sparse // ------------------------------------------ // C <M> = A .* B // ------------------------------------------ // sparse full sparse sparse // sparse full sparse bitmap // sparse full sparse full // sparse full bitmap sparse // sparse full full sparse // ------------------------------------------ // C <!M> = A .* B // ------------------------------------------ // sparse bitmap sparse sparse // sparse bitmap sparse bitmap // sparse bitmap sparse full // sparse bitmap bitmap sparse // sparse bitmap full sparse // ------------------------------------------ // C <!M> = A .* B // ------------------------------------------ // sparse full sparse sparse // sparse full sparse bitmap // sparse full sparse full // sparse full bitmap sparse // sparse full full sparse // For these cases: the mask is done later, and C=A.*B is computed // here, without the mask (M is passed as NULL): // ------------------------------------------ // C <!M>= A .* B // ------------------------------------------ // sparse sparse sparse sparse (mask later) // sparse sparse sparse bitmap (mask later) // sparse sparse sparse full (mask later) // sparse sparse bitmap sparse (mask later) // sparse sparse full sparse (mask later) // phase1: does not compute C itself, but just counts the # of entries in each // vector of C. Fine tasks compute the # of entries in their slice of a // single vector of C, and the results are cumsum'd. // phase2: computes C, using the counts computed by phase1. { //-------------------------------------------------------------------------- // phase1: count entries in each C(:,j) // phase2: compute C //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < C_ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; bool fine_task = (klast == -1) ; int64_t len ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; len = TaskList [taskid].len ; } else { // a coarse task operates on one or more whole vectors len = vlen ; } //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of C //------------------------------------------------------------------ int64_t j = GBH (Ch, k) ; #if defined ( GB_PHASE_1_OF_2 ) int64_t cjnz = 0 ; #else int64_t pC, pC_end ; if (fine_task) { // A fine task computes a slice of C(:,j) pC = TaskList [taskid ].pC ; pC_end = TaskList [taskid+1].pC ; ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ; } else { // The vectors of C are never sliced for a coarse task. pC = Cp [k] ; pC_end = Cp [k+1] ; } int64_t cjnz = pC_end - pC ; if (cjnz == 0) continue ; #endif //------------------------------------------------------------------ // get A(:,j) //------------------------------------------------------------------ int64_t pA = -1, pA_end = -1 ; if (fine_task) { // A fine task operates on Ai,Ax [pA...pA_end-1], which is // a subset of the vector A(:,j) pA = TaskList [taskid].pA ; pA_end = TaskList [taskid].pA_end ; } else { // A coarse task operates on the entire vector A (:,j) int64_t kA = (Ch == Ah) ? k : ((C_to_A == NULL) ? j : C_to_A [k]) ; if (kA >= 0) { pA = GBP (Ap, kA, vlen) ; pA_end = GBP (Ap, kA+1, vlen) ; } } int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice int64_t pA_start = pA ; bool adense = (ajnz == len) ; // get the first and last indices in A(:,j) for this vector int64_t iA_first = -1 ; if (ajnz > 0) { iA_first = GBI (Ai, pA, vlen) ; } #if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG ) int64_t iA_last = -1 ; if (ajnz > 0) { iA_last = GBI (Ai, pA_end-1, vlen) ; } #endif //------------------------------------------------------------------ // get B(:,j) //------------------------------------------------------------------ int64_t pB = -1, pB_end = -1 ; if (fine_task) { // A fine task operates on Bi,Bx [pB...pB_end-1], which is // a subset of the vector B(:,j) pB = TaskList [taskid].pB ; pB_end = TaskList [taskid].pB_end ; } else { // A coarse task operates on the entire vector B (:,j) int64_t kB = (Ch == Bh) ? k : ((C_to_B == NULL) ? j : C_to_B [k]) ; if (kB >= 0) { pB = GBP (Bp, kB, vlen) ; pB_end = GBP (Bp, kB+1, vlen) ; } } int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice int64_t pB_start = pB ; bool bdense = (bjnz == len) ; // get the first and last indices in B(:,j) for this vector int64_t iB_first = -1 ; if (bjnz > 0) { iB_first = GBI (Bi, pB, vlen) ; } #if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG ) int64_t iB_last = -1 ; if (bjnz > 0) { iB_last = GBI (Bi, pB_end-1, vlen) ; } #endif //------------------------------------------------------------------ // get M(:,j) if M is sparse or hypersparse //------------------------------------------------------------------ int64_t pM = -1 ; int64_t pM_end = -1 ; if (M_is_sparse_or_hyper) { if (fine_task) { // A fine task operates on Mi,Mx [pM...pM_end-1], which is // a subset of the vector M(:,j) pM = TaskList [taskid].pM ; pM_end = TaskList [taskid].pM_end ; } else { int64_t kM = -1 ; if (Ch == Mh) { // Ch is the same as Mh (a shallow copy), or both NULL kM = k ; } else { kM = (C_to_M == NULL) ? j : C_to_M [k] ; } if (kM >= 0) { pM = GBP (Mp, kM, vlen) ; pM_end = GBP (Mp, kM+1, vlen) ; } } } //------------------------------------------------------------------ // C(:,j)<optional mask> = A (:,j) .* B (:,j) or subvector //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (ajnz == 0 || bjnz == 0) { //-------------------------------------------------------------- // A(:,j) and/or B(:,j) are empty //-------------------------------------------------------------- ; } else if (iA_last < iB_first || iB_last < iA_first) { //-------------------------------------------------------------- // intersection of A(:,j) and B(:,j) is empty //-------------------------------------------------------------- // the last entry of A(:,j) comes before the first entry // of B(:,j), or visa versa ; } else #endif if (M == NULL) { //-------------------------------------------------------------- // M is not present, or !M is sparse but not applied here //-------------------------------------------------------------- // ------------------------------------------ // C = A .* B // ------------------------------------------ // sparse . sparse sparse // sparse . sparse bitmap // sparse . sparse full // sparse . bitmap sparse // sparse . full sparse // ------------------------------------------ // C <!M>= A .* B // ------------------------------------------ // sparse sparse sparse sparse (mask later) // sparse sparse sparse bitmap (mask later) // sparse sparse sparse full (mask later) // sparse sparse bitmap sparse (mask later) // sparse sparse full sparse (mask later) // A or B are sparse/hyper, or both ASSERT (A_is_sparse || A_is_hyper || B_is_sparse || B_is_hyper); if (A_is_bitmap) { //---------------------------------------------------------- // Method01: A(:,j) is bitmap; B(:,j) is sparse/hyper //---------------------------------------------------------- // TODO: B can be jumbled; then so is C ASSERT (B_is_sparse || B_is_hyper) ; for ( ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; int64_t pA = pA_start + i - iA_first ; if (!Ab [pA]) continue ; // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; pC++ ; #endif } } else if (B_is_bitmap) { //---------------------------------------------------------- // Method02: B(:,j) is bitmap; A(:,j) is sparse/hyper //---------------------------------------------------------- // TODO: A can be jumbled; then so is C ASSERT (A_is_sparse || A_is_hyper) ; for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; int64_t pB = pB_start + i - iB_first ; if (!Bb [pB]) continue ; // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; pC++ ; #endif } } else if (adense && bdense) { //---------------------------------------------------------- // Method03: A(:,j) and B(:,j) dense: thus C(:,j) dense //---------------------------------------------------------- // TODO: only do this if A and B are full, not just (:,j) // Then no matrix will be jumbled. ASSERT (ajnz == bjnz) ; ASSERT (iA_first == iB_first) ; ASSERT (iA_last == iB_last ) ; #if defined ( GB_PHASE_1_OF_2 ) cjnz = ajnz ; #else ASSERT (cjnz == ajnz) ; GB_PRAGMA_SIMD_VECTORIZE for (int64_t p = 0 ; p < ajnz ; p++) { // C (i,j) = A (i,j) .* B (i,j) int64_t i = p + iA_first ; Ci [pC + p] = i ; ASSERT (GBI (Ai, pA + p, vlen) == i) ; ASSERT (GBI (Bi, pB + p, vlen) == i) ; GB_GETA (aij, Ax, pA + p) ; GB_GETB (bij, Bx, pB + p) ; GB_BINOP (GB_CX (pC + p), aij, bij, i, j) ; } #endif } else if (adense) { //---------------------------------------------------------- // Method04: A(:,j) dense, B(:,j) sparse: C(:,j) sparse //---------------------------------------------------------- // TODO: only do this if A is full, not just A(:,j) // TODO: B can be jumbled; then so is C #if defined ( GB_PHASE_1_OF_2 ) cjnz = bjnz ; #else ASSERT (cjnz == bjnz) ; GB_PRAGMA_SIMD_VECTORIZE for (int64_t p = 0 ; p < bjnz ; p++) { // C (i,j) = A (i,j) .* B (i,j) int64_t i = Bi [pB + p] ; Ci [pC + p] = i ; GB_GETA (aij, Ax, pA + i - iA_first) ; GB_GETB (bij, Bx, pB + p) ; GB_BINOP (GB_CX (pC + p), aij, bij, i, j) ; } #endif } else if (bdense) { //---------------------------------------------------------- // Method05: A(:,j) sparse, B(:,j) dense: C(:,j) sparse //---------------------------------------------------------- // TODO: only do this if B is full, not just B(:,j) // TODO: A can be jumbled; then so is C #if defined ( GB_PHASE_1_OF_2 ) cjnz = ajnz ; #else ASSERT (cjnz == ajnz) ; GB_PRAGMA_SIMD_VECTORIZE for (int64_t p = 0 ; p < ajnz ; p++) { // C (i,j) = A (i,j) .* B (i,j) int64_t i = Ai [pA + p] ; Ci [pC + p] = i ; GB_GETA (aij, Ax, pA + p) ; GB_GETB (bij, Bx, pB + i - iB_first) ; GB_BINOP (GB_CX (pC + p), aij, bij, i, j) ; } #endif } else if (ajnz > 32 * bjnz) { //---------------------------------------------------------- // Method06: A(:,j) is much denser than B(:,j) //---------------------------------------------------------- // A and B cannot be jumbled for ( ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; // find i in A(:,j) int64_t pright = pA_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Ai, pA, pright, found) ; if (found) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else if (bjnz > 32 * ajnz) { //---------------------------------------------------------- // Method07: B(:,j) is much denser than A(:,j) //---------------------------------------------------------- // A and B cannot be jumbled for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; // find i in B(:,j) int64_t pright = pB_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Bi, pB, pright, found) ; if (found) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else { //---------------------------------------------------------- // Method08: A(:,j) and B(:,j) about the sparsity //---------------------------------------------------------- // linear-time scan of A(:,j) and B(:,j) // A and B cannot be jumbled while (pA < pA_end && pB < pB_end) { int64_t iA = Ai [pA] ; int64_t iB = Bi [pB] ; if (iA < iB) { // A(i,j) exists but not B(i,j) pA++ ; } else if (iB < iA) { // B(i,j) exists but not A(i,j) pB++ ; } else { // both A(i,j) and B(i,j) exist // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = iB ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij, iB, j) ; pC++ ; #endif pA++ ; pB++ ; } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } } else if (M_is_sparse_or_hyper) { //-------------------------------------------------------------- // Method09: C and M are sparse or hypersparse //-------------------------------------------------------------- // ------------------------------------------ // C <M>= A .* B // ------------------------------------------ // sparse sparse sparse sparse // sparse sparse sparse bitmap // sparse sparse sparse full // sparse sparse bitmap sparse // sparse sparse bitmap bitmap // sparse sparse bitmap full // sparse sparse full sparse // sparse sparse full bitmap for ( ; pM < pM_end ; pM++) { // M can be jumbled; A and B cannot //---------------------------------------------------------- // get M(i,j) for A(i,j) .* B (i,j) //---------------------------------------------------------- int64_t i = GBI (Mi, pM, vlen) ; bool mij = GB_mcast (Mx, pM, msize) ; if (!mij) continue ; //---------------------------------------------------------- // get A(i,j) //---------------------------------------------------------- bool afound ; if (adense) { // A(:,j) is dense, bitmap, or full; use quick lookup pA = pA_start + i - iA_first ; afound = GBB (Ab, pA) ; } else { // A(:,j) is sparse; use binary search for A(i,j) int64_t apright = pA_end - 1 ; GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ; } if (!afound) continue ; ASSERT (GBI (Ai, pA, vlen) == i) ; //---------------------------------------------------------- // get B(i,j) //---------------------------------------------------------- bool bfound ; if (bdense) { // B(:,j) is dense; use direct lookup for B(i,j) pB = pB_start + i - iB_first ; bfound = GBB (Bb, pB) ; } else { // B(:,j) is sparse; use binary search for B(i,j) int64_t bpright = pB_end - 1 ; GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ; } if (!bfound) continue ; ASSERT (GBI (Bi, pB, vlen) == i) ; //---------------------------------------------------------- // C(i,j) = A(i,j) .* B(i,j) //---------------------------------------------------------- // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; pC++ ; #endif } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else { //-------------------------------------------------------------- // M is bitmap or full, for either C<M>=A.*B or C<!M>=A.*B //-------------------------------------------------------------- // ------------------------------------------ // C <M> = A .* B // ------------------------------------------ // sparse bitmap sparse sparse // sparse bitmap sparse bitmap // sparse bitmap sparse full // sparse bitmap bitmap sparse // sparse bitmap full sparse // ------------------------------------------ // C <M> = A .* B // ------------------------------------------ // sparse full sparse sparse // sparse full sparse bitmap // sparse full sparse full // sparse full bitmap sparse // sparse full full sparse // ------------------------------------------ // C <!M> = A .* B // ------------------------------------------ // sparse bitmap sparse sparse // sparse bitmap sparse bitmap // sparse bitmap sparse full // sparse bitmap bitmap sparse // sparse bitmap full sparse // ------------------------------------------ // C <!M> = A .* B // ------------------------------------------ // sparse full sparse sparse // sparse full sparse bitmap // sparse full sparse full // sparse full bitmap sparse // sparse full full sparse // GB_GET_MIJ: get M(i,j) where M is bitmap or full #undef GB_GET_MIJ #define GB_GET_MIJ(i) \ int64_t pM = pM_start + i ; \ bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; \ if (Mask_comp) mij = !mij ; // A or B are sparse/hyper, or both ASSERT (A_is_sparse || A_is_hyper || B_is_sparse || B_is_hyper); int64_t pM_start = j * vlen ; if (A_is_bitmap) { //---------------------------------------------------------- // Method10: A(:,j) bitmap; B(:,j) sparse, M bitmap/full //---------------------------------------------------------- // TODO: B can be jumbled; then so is C ASSERT (B_is_sparse || B_is_hyper) ; for ( ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; GB_GET_MIJ (i) ; if (mij) { // C (i,j) = A (i,j) .* B (i,j) int64_t pA = pA_start + i - iA_first ; if (!Ab [pA]) continue ; #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; pC++ ; #endif } } } else if (B_is_bitmap) { //---------------------------------------------------------- // Method11: B(:,j) bitmap; A(:,j) sparse, M bitmap/full //---------------------------------------------------------- // TODO: A can be jumbled; then so is C ASSERT (A_is_sparse || A_is_hyper) ; for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; GB_GET_MIJ (i) ; if (mij) { // C (i,j) = A (i,j) .* B (i,j) int64_t pB = pB_start + i - iB_first ; if (!Bb [pB]) continue ; #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; pC++ ; #endif } } } else if (adense && bdense) { //---------------------------------------------------------- // Method12: A(:,j) and B(:,j) dense, M bitmap/full //---------------------------------------------------------- // TODO: only do this if A and B are full, not just (:,j) // Then no matrix will be jumbled. ASSERT (ajnz == bjnz) ; ASSERT (iA_first == iB_first) ; ASSERT (iA_last == iB_last ) ; for (int64_t p = 0 ; p < ajnz ; p++) { int64_t i = p + iA_first ; GB_GET_MIJ (i) ; if (mij) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; GB_GETA (aij, Ax, pA + p) ; // aij = Ax [pA+p] GB_GETB (bij, Bx, pB + p) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; pC++ ; #endif } } } else if (adense) { //---------------------------------------------------------- // Method13: A(:,j) dense, B(:,j) sparse, M bitmap/full //---------------------------------------------------------- // TODO: only do this if A is full, not just A(:,j) // TODO: B can be jumbled; then so is C for ( ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; GB_GET_MIJ (i) ; if (mij) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; GB_GETA (aij, Ax, pA + i - iA_first) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; pC++ ; #endif } } } else if (bdense) { //---------------------------------------------------------- // Method14: A(:,j) sparse, B(:,j) dense, M bitmap/full //---------------------------------------------------------- // TODO: only do this if B is full, not just B(:,j) // TODO: A can be jumbled; then so is C for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; GB_GET_MIJ (i) ; if (mij) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB + i - iB_first) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; pC++ ; #endif } } } else if (ajnz > 32 * bjnz) { //---------------------------------------------------------- // Method15: A(:,j) much denser than B(:,j), M bitmap/full //---------------------------------------------------------- // A and B cannot be jumbled for ( ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; GB_GET_MIJ (i) ; if (mij) { // find i in A(:,j) int64_t pright = pA_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Ai, pA, pright, found) ; if (found) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; pC++ ; #endif } } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else if (bjnz > 32 * ajnz) { //---------------------------------------------------------- // Method16: B(:,j) much denser than A(:,j), M bitmap/full //---------------------------------------------------------- // A and B cannot be jumbled for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; GB_GET_MIJ (i) ; if (mij) { // find i in B(:,j) int64_t pright = pB_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Bi, pB, pright, found) ; if (found) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; pC++ ; #endif } } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else { //---------------------------------------------------------- // Method17: A(:,j) and B(:,j) about the same, M bitmap/full //---------------------------------------------------------- // linear-time scan of A(:,j) and B(:,j) // A and B cannot be jumbled while (pA < pA_end && pB < pB_end) { int64_t iA = Ai [pA] ; int64_t iB = Bi [pB] ; if (iA < iB) { // A(i,j) exists but not B(i,j) pA++ ; } else if (iB < iA) { // B(i,j) exists but not A(i,j) pB++ ; } else { // both A(i,j) and B(i,j) exist int64_t i = iA ; GB_GET_MIJ (i) ; if (mij) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij, iB, j) ; pC++ ; #endif } pA++ ; pB++ ; } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } } //------------------------------------------------------------------ // final count of nnz (C (:,j)) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (fine_task) { TaskList [taskid].pC = cjnz ; } else { Cp [k] = cjnz ; } #endif } } }
cgeqrs.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgeqrs.c, normal z -> c, Fri Sep 28 17:38:05 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_geqrs * * Computes a minimum-norm solution min || A*X - B || using the * QR factorization A = Q*R computed by plasma_cgeqrf. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix A. m >= 0. * * @param[in] n * The number of columns of the matrix A. m >= n >= 0. * * @param[in] nrhs * The number of columns of B. nrhs >= 0. * * @param[in] pA * Details of the QR factorization of the original matrix A as returned * by plasma_cgeqrf. * * @param[in] lda * The leading dimension of the array A. lda >= m. * * @param[in] T * Auxiliary factorization data, computed by plasma_cgeqrf. * * @param[in,out] pB * On entry, pointer to the m-by-nrhs right hand side matrix B. * On exit, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_cgeqrs * @sa plasma_cgeqrs * @sa plasma_dgeqrs * @sa plasma_sgeqrs * @sa plasma_cgeqrf * @sa plasma_cgels * ******************************************************************************/ int plasma_cgeqrs(int m, int n, int nrhs, plasma_complex32_t *pA, int lda, plasma_desc_t T, plasma_complex32_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0 || n > m) { plasma_error("illegal value of n"); return -2; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -3; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -5; } if (ldb < imax(1, imax(1, m))) { plasma_error("illegal value of ldb"); return -8; } // quick return if (m == 0 || n == 0 || nrhs == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_geqrf(plasma, PlasmaComplexFloat, m, n); // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, m, nrhs, 0, 0, m, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = ib*nb; // unmqr: work retval = plasma_workspace_create(&work, lwork, PlasmaComplexFloat); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); plasma_omp_cge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_cgeqrs(A, T, B, work, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_geqrs * * Computes a minimum-norm solution using the tile QR factorization. * Non-blocking tile version of plasma_cgeqrs(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] A * Descriptor of matrix A. * A is stored in the tile layout. * * @param[in] T * Descriptor of matrix T. * Auxiliary factorization data, computed by plasma_cgeqrf. * * @param[in,out] B * Descriptor of matrix B. * On entry, right-hand side matrix B in the tile layout. * On exit, solution matrix X in the tile layout. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For multiplication by Q contains preallocated space for work * arrays. Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_cgeqrs * @sa plasma_omp_cgeqrs * @sa plasma_omp_dgeqrs * @sa plasma_omp_sgeqrs * @sa plasma_omp_cgeqrf * @sa plasma_omp_cgels * ******************************************************************************/ void plasma_omp_cgeqrs(plasma_desc_t A, plasma_desc_t T, plasma_desc_t B, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid descriptor T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid descriptor B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0 || A.n == 0 || B.n == 0) return; // Find Y = Q^H * B. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pcunmqr_tree(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } else { plasma_pcunmqr(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } // Solve R * X = Y. plasma_pctrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, plasma_desc_view(A, 0, 0, A.n, A.n), plasma_desc_view(B, 0, 0, A.n, B.n), sequence, request); }
ZQ_FaceIDPrecisionEvaluation.h
#ifndef _ZQ_FACEID_PRECISION_EVALUATION_H_ #define _ZQ_FACEID_PRECISION_EVALUATION_H_ #pragma once #include "ZQ_FaceRecognizer.h" #include "ZQ_FaceFeature.h" #include "ZQ_MathBase.h" #include "ZQ_MergeSort.h" #include <opencv2\opencv.hpp> #include <vector> #include <stdlib.h> #include <string> #include <omp.h> namespace ZQ { class ZQ_FaceIDPrecisionEvaluation { class EvaluationPair { public: std::string fileL; std::string nameL; int idL; std::string fileR; std::string nameR; int idR; int flag; //-1 or 1 ZQ_FaceFeature featL; ZQ_FaceFeature featR; bool valid; }; class EvaluationSingle { public: std::string name; int id; ZQ_FaceFeature feat; EvaluationSingle& operator = (const EvaluationSingle& v2) { name = v2.name; id = v2.id; feat.CopyData(v2.feat); return *this; } bool operator < (const EvaluationSingle& v2) const { #if defined(_WIN32) int cmp_v = _strcmpi(name.c_str(), v2.name.c_str()); #else int cmp_v = strcmp(name.c_str(), v2.name.c_str()); #endif if (cmp_v < 0) return true; else if (cmp_v > 0) return false; else { return id < v2.id; } } bool operator > (const EvaluationSingle& v2) const { #if defined(_WIN32) int cmp_v = _strcmpi(name.c_str(), v2.name.c_str()); #else int cmp_v = strcmp(name.c_str(), v2.name.c_str()); #endif if (cmp_v > 0) return true; else if (cmp_v < 0) return false; else { return id > v2.id; } } bool operator == ( const EvaluationSingle& v2) const { #if defined(_WIN32) int cmp_v = _strcmpi(name.c_str(), v2.name.c_str()); #else int cmp_v = strcmp(name.c_str(), v2.name.c_str()); #endif return cmp_v == 0 && id == v2.id; } bool SameName(const EvaluationSingle& v2) const { #if defined(_WIN32) int cmp_v = _strcmpi(name.c_str(), v2.name.c_str()); #else int cmp_v = strcmp(name.c_str(), v2.name.c_str()); #endif return cmp_v == 0; } }; public: static bool EvaluationOnLFW(std::vector<ZQ_FaceRecognizer*>& recognizers, const std::string& list_file, const std::string& folder, bool use_flip) { int recognizer_num = recognizers.size(); if (recognizer_num == 0) return false; int real_num_threads = __max(1, __min(recognizer_num, omp_get_num_procs() - 1)); int feat_dim = recognizers[0]->GetFeatDim(); int real_dim = use_flip ? (feat_dim * 2) : feat_dim; printf("feat_dim = %d, real_dim = %d\n", feat_dim, real_dim); std::vector<std::vector<EvaluationPair> > pairs; if (!_parse_lfw_list(list_file, folder, pairs)) { printf("failed to parse list file %s\n", list_file.c_str()); return EXIT_FAILURE; } printf("parse list file %s done!\n", list_file.c_str()); int part_num = pairs.size(); std::vector<std::pair<int, int> > pair_list; for (int i = 0; i < part_num; i++) { for (int j = 0; j < pairs[i].size(); j++) { pair_list.push_back(std::make_pair(i, j)); } } double t1 = omp_get_wtime(); if (real_num_threads == 1) { int handled_num = 0; for (int nn = 0; nn < pair_list.size(); nn++) { handled_num++; if (handled_num % 100 == 0) printf("%d handled\n", handled_num); int i = pair_list[nn].first; int j = pair_list[nn].second; pairs[i][j].featL.ChangeSize(real_dim); pairs[i][j].featR.ChangeSize(real_dim); cv::Mat imgL = cv::imread(pairs[i][j].fileL); if (imgL.empty()) { printf("failed to load image %s\n", pairs[i][j].fileL.c_str()); pairs[i][j].valid = false; continue; } cv::Mat imgR = cv::imread(pairs[i][j].fileR); if (imgR.empty()) { printf("failed to load image %s\n", pairs[i][j].fileR.c_str()); pairs[i][j].valid = false; continue; } if (!recognizers[0]->ExtractFeature(imgL.data, imgL.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featL.pData, true)) { printf("failed to extract feature for image %s\n", pairs[i][j].fileL.c_str()); pairs[i][j].valid = false; continue; } if (!recognizers[0]->ExtractFeature(imgR.data, imgR.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featR.pData, true)) { printf("failed to extract feature for image %s\n", pairs[i][j].fileR.c_str()); pairs[i][j].valid = false; continue; } if (use_flip) { cv::flip(imgL, imgL, 1); cv::flip(imgR, imgR, 1); if (!recognizers[0]->ExtractFeature(imgL.data, imgL.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featL.pData+feat_dim, true)) { printf("failed to extract feature for image %s\n", pairs[i][j].fileL.c_str()); pairs[i][j].valid = false; continue; } if (!recognizers[0]->ExtractFeature(imgR.data, imgR.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featR.pData+feat_dim, true)) { printf("failed to extract feature for image %s\n", pairs[i][j].fileR.c_str()); pairs[i][j].valid = false; continue; } } pairs[i][j].valid = true; } } else { int handled_num = 0; #pragma omp parallel for schedule(dynamic, 10) num_threads(real_num_threads) for (int nn = 0; nn < pair_list.size(); nn++) { #pragma omp critical { handled_num++; if (handled_num % 100 == 0) { printf("%d handled\n", handled_num); } } int thread_id = omp_get_thread_num(); int i = pair_list[nn].first; int j = pair_list[nn].second; pairs[i][j].featL.ChangeSize(real_dim); pairs[i][j].featR.ChangeSize(real_dim); cv::Mat imgL = cv::imread(pairs[i][j].fileL); if (imgL.empty()) { #pragma omp critical { printf("failed to load image %s\n", pairs[i][j].fileL.c_str()); } pairs[i][j].valid = false; continue; } cv::Mat imgR = cv::imread(pairs[i][j].fileR); if (imgR.empty()) { #pragma omp critical { printf("failed to load image %s\n", pairs[i][j].fileR.c_str()); } pairs[i][j].valid = false; continue; } if (!recognizers[thread_id]->ExtractFeature(imgL.data, imgL.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featL.pData, true)) { #pragma omp critical { printf("failed to extract feature for image %s\n", pairs[i][j].fileL.c_str()); } pairs[i][j].valid = false; continue; } if (!recognizers[thread_id]->ExtractFeature(imgR.data, imgR.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featR.pData, true)) { #pragma omp critical { printf("failed to extract feature for image %s\n", pairs[i][j].fileR.c_str()); } pairs[i][j].valid = false; continue; } if (use_flip) { cv::flip(imgL, imgL, 1); cv::flip(imgR, imgR, 1); if (!recognizers[thread_id]->ExtractFeature(imgL.data, imgL.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featL.pData + feat_dim, true)) { #pragma omp critical { printf("failed to extract feature for image %s\n", pairs[i][j].fileL.c_str()); } pairs[i][j].valid = false; continue; } if (!recognizers[thread_id]->ExtractFeature(imgR.data, imgR.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featR.pData + feat_dim, true)) { #pragma omp critical { printf("failed to extract feature for image %s\n", pairs[i][j].fileR.c_str()); } pairs[i][j].valid = false; continue; } } pairs[i][j].valid = true; } } printf("extract feature done!"); double t2 = omp_get_wtime(); printf("extract features cost: %.3f secs\n", t2 - t1); int erased_num = 0; for (int i = 0; i < part_num; i++) { for (int j = pairs[i].size() - 1; j >= 0; j--) { if (!pairs[i][j].valid) { pairs[i].erase(pairs[i].begin() + j); erased_num++; } else { ZQ_MathBase::Normalize(real_dim, pairs[i][j].featL.pData); ZQ_MathBase::Normalize(real_dim, pairs[i][j].featR.pData); } } } printf("%d pairs haved been erased\n", erased_num); std::vector<EvaluationSingle> singles; for (int i = 0; i < part_num; i++) { for (int j = 0; j < pairs[i].size(); j++) { EvaluationSingle cur_single; cur_single.name = pairs[i][j].nameL; cur_single.id = pairs[i][j].idL; cur_single.feat.CopyData(pairs[i][j].featL); singles.push_back(cur_single); cur_single.name = pairs[i][j].nameR; cur_single.id = pairs[i][j].idR; cur_single.feat.CopyData(pairs[i][j].featR); singles.push_back(cur_single); } } float ACC = _compute_accuracy(pairs); _compute_far_tar(singles, real_num_threads); return true; } private: static float _compute_accuracy(const std::vector<std::vector<EvaluationPair> >& pairs) { int part_num = pairs.size(); std::vector<float> ACCs(part_num); float ACC = 0; for (int i = 0; i < part_num; i++) { std::vector<EvaluationPair> val_pairs; for (int j = 0; j < part_num; j++) { if (j != i) val_pairs.insert(val_pairs.end(), pairs[j].begin(), pairs[j].end()); } ZQ_FaceFeature mu; _compute_mu(val_pairs, mu); std::vector<double> val_scores, test_scores; _compute_scores(val_pairs, mu, val_scores); _compute_scores(pairs[i], mu, test_scores); double threshold = _get_threshold(val_pairs, val_scores, 10000); ACCs[i] = _get_accuracy(pairs[i], test_scores, threshold); ACC += ACCs[i]; printf("%d\t%2.2f%% (threshold = %f)\n", i, ACCs[i] * 100, threshold); /*const static int BUF_LEN = 50; char file[BUF_LEN]; sprintf_s(file, BUF_LEN, "%d_mu.txt", i); FILE* out = 0; fopen_s(&out, file, "w"); for (int k = 0; k < mu.length; k++) fprintf(out, "%12.6f\n", mu.pData[k]); fclose(out); sprintf_s(file, BUF_LEN, "%d_validscores.txt", i); fopen_s(&out, file, "w"); for (int k = 0; k < val_scores.size(); k++) fprintf(out, "%12.6f\n", val_scores[k]); fclose(out); sprintf_s(file, BUF_LEN, "%d_testscores.txt", i); fopen_s(&out, file, "w"); for (int k = 0; k < test_scores.size(); k++) fprintf(out, "%12.6f\n", test_scores[k]); fclose(out);*/ } printf("----------------\n"); printf("AVE\t%2.2f%%\n", ACC / part_num * 100); return ACC; } static bool _parse_lfw_list(const std::string& list_file, const std::string& folder, std::vector<std::vector<EvaluationPair> >& pairs) { FILE* in = 0; #if defined(_WIN32) if(0 != fopen_s(&in, list_file.c_str(), "r")) return false; #else in = fopen(list_file.c_str(), "r"); if (in == NULL) return false; #endif int part_num, half_pair_num; const static int BUF_LEN = 200; char line[BUF_LEN]; fgets(line, BUF_LEN, in); sscanf_s(line, "%d%d", &part_num, &half_pair_num); pairs.resize(part_num); std::vector<std::string> strings; for (int i = 0; i < part_num; i++) { for (int j = 0; j < 2 * half_pair_num; j++) { fgets(line, 199, in); int len = strlen(line); if (line[len - 1] == '\n') line[--len] = '\0'; std::string input = line; _split_string(input, std::string("\t"), strings); if (strings.size() == 3) { EvaluationPair cur_pair; cur_pair.nameL = strings[0]; cur_pair.nameR = strings[0]; cur_pair.idL = atoi(strings[1].c_str()); cur_pair.idR = atoi(strings[2].c_str()); char num2str[BUF_LEN]; #if defined(_WIN32) sprintf_s(num2str, BUF_LEN, "_%04i.jpg", atoi(strings[1].c_str())); cur_pair.fileL = folder + "\\" + strings[0] + "\\" + strings[0] + std::string(num2str); sprintf_s(num2str, BUF_LEN, "_%04i.jpg", atoi(strings[2].c_str())); cur_pair.fileR = folder + "\\" + strings[0] + "\\" + strings[0] + std::string(num2str); #else sprintf(num2str, "_%04i.jpg", atoi(strings[1].c_str())); cur_pair.fileL = folder + "/" + strings[0] + "/" + strings[0] + std::string(num2str); sprintf(num2str, "_%04i.jpg", atoi(strings[2].c_str())); cur_pair.fileR = folder + "/" + strings[0] + "/" + strings[0] + std::string(num2str); #endif cur_pair.flag = 1; pairs[i].push_back(cur_pair); } else if (strings.size() == 4) { EvaluationPair cur_pair; cur_pair.nameL = strings[0]; cur_pair.nameR = strings[2]; cur_pair.idL = atoi(strings[1].c_str()); cur_pair.idR = atoi(strings[3].c_str()); char num2str[BUF_LEN]; #if defined(_WIN32) sprintf_s(num2str, BUF_LEN, "_%04i.jpg", atoi(strings[1].c_str())); cur_pair.fileL = folder + "\\" + strings[0] + "\\" + strings[0] + std::string(num2str); sprintf_s(num2str, BUF_LEN, "_%04i.jpg", atoi(strings[3].c_str())); cur_pair.fileR = folder + "\\" + strings[2] + "\\" + strings[2] + std::string(num2str); #else sprintf(num2str, "_%04i.jpg", atoi(strings[1].c_str())); cur_pair.fileL = folder + "/" + strings[0] + "/" + strings[0] + std::string(num2str); sprintf(num2str, "_%04i.jpg", atoi(strings[3].c_str())); cur_pair.fileR = folder + "/" + strings[2] + "/" + strings[2] + std::string(num2str); #endif cur_pair.flag = -1; pairs[i].push_back(cur_pair); } } } fclose(in); return true; } static bool _compute_mu(const std::vector<EvaluationPair>& val_pairs, ZQ_FaceFeature& mu) { if (val_pairs.size() == 0) return false; int feat_dim = val_pairs[0].featL.length; mu.ChangeSize(feat_dim); std::vector<double> sum(feat_dim); for (int dd = 0; dd < feat_dim; dd++) sum[dd] = 0; for (int i = 0; i < val_pairs.size(); i++) { for (int dd = 0; dd < feat_dim; dd++) { sum[dd] += val_pairs[i].featL.pData[dd]; sum[dd] += val_pairs[i].featR.pData[dd]; } } for (int dd = 0; dd < feat_dim; dd++) { mu.pData[dd] = sum[dd] / (2 * val_pairs.size()); } return true; } static bool _compute_scores(const std::vector<EvaluationPair>& pairs, const ZQ_FaceFeature& mu, std::vector<double>& scores) { int num = pairs.size(); if (num == 0) return false; scores.resize(num); int feat_dim = mu.length; std::vector<double> featL(feat_dim), featR(feat_dim); for (int i = 0; i < num; i++) { for (int j = 0; j < feat_dim; j++) { featL[j] = pairs[i].featL.pData[j] - mu.pData[j]; featR[j] = pairs[i].featR.pData[j] - mu.pData[j]; } double lenL = 0, lenR = 0; for (int j = 0; j < feat_dim; j++) { lenL += featL[j] * featL[j]; lenR += featR[j] * featR[j]; } lenL = sqrt(lenL); lenR = sqrt(lenR); if (lenL != 0) { for (int j = 0; j < feat_dim; j++) featL[j] /= lenL; } if (lenR != 0) { for (int j = 0; j < feat_dim; j++) featR[j] /= lenR; } double sco = 0; for (int j = 0; j < feat_dim; j++) sco += featL[j] * featR[j]; scores[i] = sco; } return true; } static float _get_threshold(const std::vector<EvaluationPair>& pairs, const std::vector<double>& scores, int thrNum) { std::vector<double> accurarys(2 * thrNum + 1); for (int i = 0; i < 2 * thrNum + 1; i++) { double threshold = (double)i / thrNum - 1; accurarys[i] = _get_accuracy(pairs, scores, threshold); } double max_acc = accurarys[0]; for (int j = 1; j < 2 * thrNum + 1; j++) max_acc = __max(max_acc, accurarys[j]); double sum_threshold = 0; int sum_num = 0; for (int i = 0; i < 2 * thrNum + 1; i++) { if (max_acc == accurarys[i]) { sum_threshold += (double)i / thrNum - 1; sum_num++; } } return sum_threshold / sum_num; } static float _get_accuracy(const std::vector<EvaluationPair>& pairs, const std::vector<double>& scores, double threshold) { if (pairs.size() == 0 || pairs.size() != scores.size()) return 0; double sum = 0; for (int i = 0; i < pairs.size(); i++) { if (pairs[i].flag > 0 && scores[i] > threshold || pairs[i].flag < 0 && scores[i] < threshold) sum++; } return sum / pairs.size(); } static void _split_string(const std::string& s, const std::string& delim, std::vector< std::string >& ret) { size_t last = 0; size_t index = s.find_first_of(delim, last); ret.clear(); while (index != std::string::npos) { ret.push_back(s.substr(last, index - last)); last = index + 1; index = s.find_first_of(delim, last); } if (index - last>0) { ret.push_back(s.substr(last, index - last)); } } static void _compute_far_tar(std::vector<EvaluationSingle>& singles, int real_num_threads) { printf("compute far tar begin\n"); ZQ_MergeSort::MergeSort(&singles[0], singles.size(), true); int removed_num = 0; for (int i = singles.size() - 2; i >= 0; i--) { if (singles[i] == singles[i + 1]) { singles.erase(singles.begin() + i + 1); removed_num++; } } int image_num = singles.size(); printf("%d removed, remain %d\n", removed_num, image_num); int all_num = image_num*(image_num - 1)/2; std::vector<float> all_scores(all_num); std::vector<int> all_idx_i(all_num), all_idx_j(all_num); std::vector<bool> all_flag(all_num); std::vector<int> sort_indices(all_num); int idx = 0; int same_num = 0; for (int i = 0; i < image_num; i++) { for (int j = i + 1; j < image_num; j++) { all_idx_i[idx] = i; all_idx_j[idx] = j; bool is_same = singles[i].SameName(singles[j]); all_flag[idx] = is_same; if (is_same) same_num++; sort_indices[idx] = idx; idx++; } } int notsame_num = all_num - same_num; printf("all_num = %d, same_num = %d, notsame_num = %d\n", all_num, same_num, notsame_num); double t1 = omp_get_wtime(); int dim = singles[0].feat.length; if (real_num_threads == 1) { for (int n = 0; n < all_num; n++) { int i = all_idx_i[n]; int j = all_idx_j[n]; all_scores[n] = ZQ_MathBase::DotProduct(dim, singles[i].feat.pData, singles[j].feat.pData); } } else { int chunk_size = (all_num + real_num_threads - 1) / real_num_threads; #pragma omp parallel for schedule(static, chunk_size) num_threads(real_num_threads) for (int n = 0; n < all_num; n++) { int i = all_idx_i[n]; int j = all_idx_j[n]; all_scores[n] = ZQ_MathBase::DotProduct(dim, singles[i].feat.pData, singles[j].feat.pData); } } double t2 = omp_get_wtime(); printf("compute all scores cost: %.3f secs\n", t2 - t1); ZQ_MergeSort::MergeSort(&all_scores[0], &sort_indices[0], all_num, false); double t3 = omp_get_wtime(); printf("sort all scores cost: %.3f secs\n", t3 - t2); const int stage_num = 4; double far_num[stage_num] = { 1e-6 * notsame_num, 1e-5 * notsame_num, 1e-4 * notsame_num, 1e-3 * notsame_num }; int cur_far_num = 0; int cur_tar_num = 0; int cur_stage = 0; for (int i = 0; i < all_num; i++) { if (cur_stage >= stage_num) break; int sort_id = sort_indices[i]; if (all_flag[sort_id]) { cur_tar_num++; } else { cur_far_num++; } if (cur_far_num > far_num[cur_stage]) { printf("thresh = %.5f far = %15e, tar = %15f\n", all_scores[i], (double)cur_far_num / notsame_num, (double)cur_tar_num / same_num); cur_stage++; } } } }; } #endif
GB_unop__log1p_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__log1p_fc64_fc64 // op(A') function: GB_unop_tran__log1p_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_clog1p (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_clog1p (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_clog1p (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG1P || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__log1p_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog1p (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog1p (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__log1p_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nestedSIMD.c
#include <stdio.h> #define N 3 #define M 4 int main() { int a[N][M]; int i,ii, error = 0; // initialize for(i=0; i<N; i++) for(ii=0; ii<M; ii++) a[i][ii] = -1; // offload #pragma omp target map(tofrom: a[0:3][0:4]) { int k,j; #pragma omp simd for(k=0; k<N; k++) { a[k][0] = k; #pragma omp simd for(j=0; j<M; j++) { a[k][j] = j; } } } // check for(i=0; i<N; i++) { for(ii=0; ii<M; ii++) { if (a[i][ii] != ii) { ++error; } } } // report printf("Done with %d errors\n", error); return error; }
threads.c
/* * Copyright (c) 2003, 2006 Matteo Frigo * Copyright (c) 2003, 2006 Massachusetts Institute of Technology * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* threads.c: Portable thread spawning for loops, via the X(spawn_loop) function. The first portion of this file is a set of macros to spawn and join threads on various systems. */ #include "threads.h" /************************* Thread Glue *************************/ /* Adding support for a new shared memory thread API should be easy. You simply do the following things (look at the POSIX and Solaris threads code for examples): * Invent a symbol of the form USING_FOO_THREADS to denote the use of your thread API, and add an #elif defined(USING_FOO_THREADS) before the #else clause below. This is where you will put your thread definitions. In this #elif, insert the following: -- #include any header files needed to use the thread API. -- Typedef fftw_thr_function to be a function pointer of the type used as a argument to fftw_thr_spawn (i.e. the entry function for a thread). -- Define fftw_thr_id, via a typedef, to be the type that is used for thread identifiers. -- #define fftw_thr_spawn(tid_ptr, proc, data) to call whatever function to spawn a new thread. The new thread should call proc(data) as its starting point, and tid_ptr is a pointer to a fftw_thr_id that is set to an identifier for the thread. You can also define this as a subroutine (put it in threads.c) if it is too complicated for a macro. The prototype should be: void fftw_thr_spawn(fftw_thr_id *tid_ptr, fftw_thr_function proc, void *data); -- #define fftw_thr_wait(tid) to block until the thread whose identifier is tid has terminated. You can also define this as a subroutine (put it in threads.c) if it is too complicated for a macro. The prototype should be: void fftw_thr_wait(fftw_thr_id tid); * If semaphores are supported (which allows FFTW to pre-spawn the threads), then you should #define HAVE_SEMAPHORES and: -- typedef fftw_sem_id to the type for a semaphore id -- #define fftw_sem_init(&id) to initialize the semaphore id to zero (or equivalent) -- #define fftw_sem_destroy(&id) to destroy the id -- #define fftw_sem_wait(&id) to the equivalent of the SYSV sem_wait -- #define fftw_sem_post(&id) the equivalent of SYSV sem_post THIS IS CURRENTLY EXPERIMENTAL ONLY. * If you need to perform any initialization before using threads, put your initialization code in the X(ithreads_init)() function in threads.c, bracketed by the appropriate #ifdef of course. * Also, of course, you should modify config.h to #define USING_FOO_THREADS, or better yet modify and configure.ac so that autoconf can automatically detect your threads library. * Finally, if you do implement support for a new threads API, be sure to let us know at fftw@fftw.org so that we can distribute your code to others! */ /************************** MP directive Threads ****************************/ #if defined(USING_OPENMP_THREADS) || defined(USING_SGIMP_THREADS) /* Use MP compiler directives to induce parallelism, in which case we don't need any of the thread spawning/waiting macros: */ typedef void * (*fftw_thr_function) (void *); typedef char fftw_thr_id; /* dummy */ #define fftw_thr_spawn(tid_ptr, proc, data) ((proc)(data)) #define fftw_thr_wait(tid) (0) /* do nothing */ #define USING_COMPILER_THREADS 1 /************************** Solaris Threads ****************************/ #elif defined(USING_SOLARIS_THREADS) /* Solaris threads glue. Tested. */ /* link with -lthread */ #include <thread.h> /* Thread entry point: */ typedef void * (*fftw_thr_function) (void *); typedef thread_t fftw_thr_id; #define fftw_thr_spawn(tid_ptr, proc, data) \ thr_create(0,0,proc,data,THR_BOUND,tid_ptr) #define fftw_thr_wait(tid) thr_join(tid,0,0) /************************** BeOS Threads ****************************/ #elif defined(USING_BEOS_THREADS) /* BeOS threads glue. Tested for DR8.2. */ #include <OS.h> /* Thread entry point: */ typedef thread_entry fftw_thr_function; typedef thread_id fftw_thr_id; #define fftw_thr_spawn(tid_ptr, proc, data) { \ *(tid_ptr) = spawn_thread(proc,"FFTW",B_NORMAL_PRIORITY,data); \ resume_thread(*(tid_ptr)); \ } /* wait_for_thread requires that we pass a valid pointer as the second argument, even if we're not interested in the result. */ #define fftw_thr_wait(tid) {long exit_val;wait_for_thread(tid, &exit_val);} /************************** MacOS Threads ****************************/ #elif defined(USING_MACOS_THREADS) /* MacOS (old! old!) MP threads glue. Experimental, untested! I do not have an MP MacOS system available to me...I just read the documentation. There is actually a good chance that this will work (since the code below is so short), but I make no guarantees. Consider it to be a starting point for your own implementation. I also had to insert some code in threads.c. MacOS X has real SMP support, thank goodness; I'm leaving this code here mainly for historical purposes. */ /* Using this code in the MacOS: (See the README file for general documenation on the FFTW threads code.) To use this code, you have to do two things. First of all, you have to #define the symbol USING_MACOS_THREADS. This can be done at the top of this file or perhaps in your compiler options. Second, you have to weak-link your project to the MP library. In your code, you should check at run-time with MPLibraryIsLoaded() to see if the MP library is available. If it is not, it is still safe to call the fftw threads routines...in this case, however, you must always pass 1 for the nthreads parameter! (Otherwise, you will probably want to pass the value of MPProcessors() for the nthreads parameter.) */ #include <MP.h> typedef TaskProc fftw_thr_function; typedef MPQueueID fftw_thr_id; #define fftw_thr_spawn(tid_ptr, proc, data) { \ MPTaskID task; \ MPCreateQueue(tid_ptr); \ MPCreateTask(proc,data,kMPUseDefaultStackSize,*(tid_ptr),0,0, \ kMPNormalTaskOptions,&task); \ } #define fftw_thr_wait(tid) { \ void *param1,*param2,*param3; \ MPWaitOnQueue(tid,&param1,&param2,&param3,kDurationForever); \ MPDeleteQueue(tid); \ } /************************** Win32 Threads ****************************/ #elif defined(__WIN32__) || defined(_WIN32) || defined(_WINDOWS) /* Win32 threads glue. We have not tested this code! (I just implemented it by looking at a Win32 threads manual.) Users have reported that this code works under NT using Microsoft compilers. This code should be automatically used on Windows, assuming that one of the above macros is defined by your compiler. You must also link to the thread-safe version of the C runtime library. */ #include <windows.h> #include <process.h> typedef LPTHREAD_START_ROUTINE fftw_thr_function; typedef HANDLE fftw_thr_id; /* The following macros are based on a recommendation in the July 1999 Microsoft Systems Journal (online), to substitute a call to _beginthreadex for CreateThread. The former is needed in order to make the C runtime library thread-safe (in particular, our threads may call malloc/free). */ typedef unsigned (__stdcall *PTHREAD_START) (void *); #define chBEGINTHREADEX(psa, cbStack, pfnStartAddr, \ pvParam, fdwCreate, pdwThreadID) \ ((HANDLE) _beginthreadex( \ (void *) (psa), \ (unsigned) (cbStack), \ (PTHREAD_START) (pfnStartAddr), \ (void *) (pvParam), \ (unsigned) (fdwCreate), \ (unsigned *) (pdwThreadID))) #define fftw_thr_spawn(tid_ptr, proc, data) { \ DWORD thrid; \ *(tid_ptr) = chBEGINTHREADEX((LPSECURITY_ATTRIBUTES) NULL, 0, \ (fftw_thr_function) proc, (LPVOID) data, \ 0, &thrid); \ } #define fftw_thr_wait(tid) { \ WaitForSingleObject(tid, INFINITE); \ CloseHandle(tid); \ } /************************** Mach cthreads ****************************/ #elif defined(USING_MACH_THREADS) #ifdef HAVE_MACH_CTHREADS_H #include <mach/cthreads.h> #elif defined(HAVE_CTHREADS_H) #include <cthreads.h> #elif defined(HAVE_CTHREAD_H) #include <cthread.h> #endif typedef cthread_fn_t fftw_thr_function; typedef cthread_t fftw_thr_id; #define fftw_thr_spawn(tid_ptr, proc, data) \ *(tid_ptr) = cthread_fork(proc, (any_t) (data)) #define fftw_thr_wait(tid) cthread_join(tid) /************************** POSIX Threads ****************************/ #elif defined(USING_POSIX_THREADS) /* use the default, POSIX threads: */ /* POSIX threads glue. Tested. */ /* link with -lpthread, or better yet use ACX_PTHREAD in autoconf */ #include <pthread.h> /* Thread entry point: */ typedef void * (*fftw_thr_function) (void *); static pthread_attr_t fftw_pthread_attributes; /* attrs for POSIX threads */ static pthread_attr_t *fftw_pthread_attributes_p = 0; typedef pthread_t fftw_thr_id; #define fftw_thr_spawn(tid_ptr, proc, data) \ CK(!pthread_create(tid_ptr,fftw_pthread_attributes_p,proc,data)) #define fftw_thr_wait(tid) CK(!pthread_join(tid,0)) /* POSIX semaphores are disabled for now because, at least on my Linux machine, they don't seem to offer much performance advantage. */ #if 0 #define HAVE_SEMAPHORES 1 #include <semaphore.h> typedef sem_t fftw_sem_id; #define fftw_sem_init(pid) CK(!sem_init(pid, 0, 0)) #define fftw_sem_destroy(pid) CK(!sem_destroy(pid)) #define fftw_sem_wait(pid) CK(!sem_wait(pid)) #define fftw_sem_post(pid) CK(!sem_post(pid)) #endif /* 0 */ #elif defined(HAVE_THREADS) # error HAVE_THREADS is defined without any USING_*_THREADS #endif #if 0 /* 1 for experimental pre-spawned threads via Linux spinlocks */ #ifndef HAVE_SEMAPHORES #define HAVE_SEMAPHORES 1 /* from x86 linux/kernel.h */ /* Optimization barrier */ /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") #include <asm/spinlock.h> typedef spinlock_t fftw_sem_id; #define fftw_sem_init(pid) { spin_lock_init(pid); spin_lock(pid); } #define fftw_sem_destroy(pid) (void) (pid) #define fftw_sem_wait(pid) { spin_unlock_wait(pid); spin_lock(pid); } #define fftw_sem_post(pid) spin_unlock(pid) #endif /* !HAVE_SEMAPHORES */ #endif /* 0 */ /***********************************************************************/ #ifdef HAVE_THREADS #ifdef HAVE_SEMAPHORES typedef struct worker_data_s { fftw_thr_id tid; fftw_sem_id sid_ready; fftw_sem_id sid_done; spawn_function proc; spawn_data d; struct worker_data_s *next; } worker_data; static void *do_work(worker_data *w) { while (1) { fftw_sem_wait(&w->sid_ready); if (!w->proc) break; w->proc(&w->d); fftw_sem_post(&w->sid_done); } return 0; } worker_data *workers = (worker_data *) 0; /* make sure at least nworkers exist */ static void minimum_workforce(int nworkers) { worker_data *w = workers; while (w) { --nworkers; w = w->next; } while (nworkers-- > 0) { w = (worker_data *) MALLOC(sizeof(worker_data), OTHER); w->next = workers; fftw_sem_init(&w->sid_ready); fftw_sem_init(&w->sid_done); fftw_thr_spawn(&w->tid, (fftw_thr_function) do_work, (void *) w); workers = w; } } static void kill_workforce(void) { while (workers) { worker_data *w = workers; workers = w->next; w->proc = (spawn_function) 0; fftw_sem_post(&w->sid_ready); fftw_thr_wait(w->tid); fftw_sem_destroy(&w->sid_ready); fftw_sem_destroy(&w->sid_done); X(ifree)(w); } } #endif /* HAVE_SEMAPHORES */ /* Distribute a loop from 0 to loopmax-1 over nthreads threads. proc(d) is called to execute a block of iterations from d->min to d->max-1. d->thr_num indicate the number of the thread that is executing proc (from 0 to nthreads-1), and d->data is the same as the data parameter passed to X(spawn_loop). This function returns only after all the threads have completed. */ void X(spawn_loop)(int loopmax, int nthr, spawn_function proc, void *data) { int block_size; A(loopmax >= 0); A(nthr > 0); A(proc); if (!loopmax) return; /* Choose the block size and number of threads in order to (1) minimize the critical path and (2) use the fewest threads that achieve the same critical path (to minimize overhead). e.g. if loopmax is 5 and nthr is 4, we should use only 3 threads with block sizes of 2, 2, and 1. */ block_size = (loopmax + nthr - 1) / nthr; nthr = (loopmax + block_size - 1) / block_size; if (nthr <= 1) { spawn_data d; d.min = 0; d.max = loopmax; d.thr_num = 0; d.data = data; proc(&d); } else { #if defined(USING_COMPILER_THREADS) spawn_data d; #elif defined(HAVE_SEMAPHORES) spawn_data d; worker_data *w; #else spawn_data *d; fftw_thr_id *tid; #endif int i; THREAD_ON; /* prevent debugging mode from failing under threads */ #if defined(USING_COMPILER_THREADS) #if defined(USING_SGIMP_THREADS) #pragma parallel local(d,i) { #pragma pfor iterate(i=0; nthr; 1) #elif defined(USING_OPENMP_THREADS) #pragma omp parallel for private(d) #endif for (i = 0; i < nthr; ++i) { d.max = (d.min = i * block_size) + block_size; if (d.max > loopmax) d.max = loopmax; d.thr_num = i; d.data = data; proc(&d); } #if defined(USING_SGIMP_THREADS) } #endif #elif defined(HAVE_SEMAPHORES) --nthr; for (w = workers, i = 0; i < nthr; ++i) { A(w); w->d.max = (w->d.min = i * block_size) + block_size; w->d.thr_num = i; w->d.data = data; w->proc = proc; fftw_sem_post(&w->sid_ready); w = w->next; } d.min = i * block_size; d.max = loopmax; d.thr_num = i; d.data = data; proc(&d); for (w = workers, i = 0; i < nthr; ++i) { A(w); fftw_sem_wait(&w->sid_done); w = w->next; } #else /* explicit thread spawning: */ STACK_MALLOC(spawn_data *, d, sizeof(spawn_data) * nthr); STACK_MALLOC(fftw_thr_id *, tid, sizeof(fftw_thr_id) * (--nthr)); for (i = 0; i < nthr; ++i) { d[i].max = (d[i].min = i * block_size) + block_size; d[i].thr_num = i; d[i].data = data; fftw_thr_spawn(&tid[i], (fftw_thr_function) proc, (void *) (d + i)); } d[i].min = i * block_size; d[i].max = loopmax; d[i].thr_num = i; d[i].data = data; proc(&d[i]); for (i = 0; i < nthr; ++i) fftw_thr_wait(tid[i]); STACK_FREE(tid); STACK_FREE(d); #endif /* ! USING_COMPILER_THREADS */ THREAD_OFF; /* prevent debugging mode from failing under threads */ } } #else /* ! HAVE_THREADS */ void X(spawn_loop)(int loopmax, int nthr, spawn_function proc, void *data) { spawn_data d; UNUSED(nthr); d.min = 0; d.max = loopmax; d.thr_num = 0; d.data = data; proc(&d); } #endif /* X(ithreads_init) does any initialization that is necessary to use threads. It must be called before calling any fftw threads functions. Returns 0 if successful, and non-zero if there is an error. Do not call any fftw threads routines if X(ithreads_init) is not successful! */ int X(ithreads_init)(void) { #ifdef USING_POSIX_THREADS /* Set the thread creation attributes as necessary. If we don't change anything, just use the default attributes (NULL). */ int err, attr, attr_changed = 0; err = pthread_attr_init(&fftw_pthread_attributes); /* set to defaults */ if (err) return err; /* Make sure that threads are joinable! (they aren't on AIX) */ err = pthread_attr_getdetachstate(&fftw_pthread_attributes, &attr); if (err) return err; if (attr != PTHREAD_CREATE_JOINABLE) { err = pthread_attr_setdetachstate(&fftw_pthread_attributes, PTHREAD_CREATE_JOINABLE); if (err) return err; attr_changed = 1; } /* Make sure threads parallelize (they don't by default on Solaris). Note, however that the POSIX standard does not *require* implementations to support PTHREAD_SCOPE_SYSTEM. They may only support PTHREAD_SCOPE_PROCESS (e.g. IRIX, Cygwin). In this case, how the threads interact with other resources on the system is undefined by the standard, and we have to hope for the best. */ err = pthread_attr_getscope(&fftw_pthread_attributes, &attr); if (err) return err; if (attr != PTHREAD_SCOPE_SYSTEM) { err = pthread_attr_setscope(&fftw_pthread_attributes, PTHREAD_SCOPE_SYSTEM); if (err == 0) attr_changed = 1; /* ignore errors */ } if (attr_changed) /* we aren't using the defaults */ fftw_pthread_attributes_p = &fftw_pthread_attributes; else { fftw_pthread_attributes_p = NULL; /* use default attributes */ err = pthread_attr_destroy(&fftw_pthread_attributes); if (err) return err; } #endif /* USING_POSIX_THREADS */ #ifdef USING_MACOS_THREADS /* FIXME: don't have malloc hooks (yet) in fftw3 */ /* Must use MPAllocate and MPFree instead of malloc and free: */ if (MPLibraryIsLoaded()) { MALLOC_hook = MPAllocate; fftw_free_hook = MPFree; } #endif /* USING_MACOS_THREADS */ #if defined(USING_OPENMP_THREADS) && ! defined(_OPENMP) #error OpenMP enabled but not using an OpenMP compiler #endif #ifdef HAVE_THREADS X(mksolver_ct_hook) = X(mksolver_ct_threads); X(mksolver_hc2hc_hook) = X(mksolver_hc2hc_threads); return 0; /* no error */ #else return 0; /* no threads, no error */ #endif } /* This function must be called before using nthreads > 1, with the maximum number of threads that will be used. */ void X(threads_setmax)(int nthreads_max) { #ifdef HAVE_SEMAPHORES minimum_workforce(nthreads_max - 1); #else UNUSED(nthreads_max); #endif } void X(threads_cleanup)(void) { #ifdef USING_POSIX_THREADS if (fftw_pthread_attributes_p) { pthread_attr_destroy(fftw_pthread_attributes_p); fftw_pthread_attributes_p = 0; } #endif /* USING_POSIX_THREADS */ #ifdef HAVE_SEMAPHORES kill_workforce(); #endif #ifdef HAVE_THREADS X(mksolver_ct_hook) = 0; X(mksolver_hc2hc_hook) = 0; #endif }
task_depend.c
/* --- File task_depend.c --- */ #include <stdlib.h> #include <stdio.h> int main(int argc, char **argv) { int N = 8; int x[N][N]; int i,j; /* Initialize x */ for(i=0;i<N;i++) for(j=0;j<N;j++) x[i][j]=i+j; /* Serial computation */ for(i=1;i<N;i++){ for(j=1;j<N;j++) x[i][j] = x[i-1][j] + x[i][j-1]; } printf("Serial result:\n"); for(i=1;i<N;i++){ for(j=1;j<N;j++) printf("%8d ",x[i][j]); printf("\n"); } /* Reset x */ for(i=0;i<N;i++) for(j=0;j<N;j++) x[i][j]=i+j; /* Parallel computation */ #pragma omp parallel /* Generate parallel tasks */ for(i=1;i<N;i++){ for(j=1;j<N;j++) x[i][j] = x[i-1][j] + x[i][j-1]; } printf("Parallel result:\n"); for(i=1;i<N;i++){ for(j=1;j<N;j++) printf("%8d ",x[i][j]); printf("\n"); } }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) { for (t4=max(max(max(0,ceild(t1-7,8)),ceild(16*t2-Nz-51,64)),ceild(8*t3-Ny-51,64));t4<=min(min(min(min(floord(4*Nt+Nx-9,64),floord(8*t1+Nx+7,64)),floord(16*t2+Nx+3,64)),floord(8*t3+Nx-5,64)),floord(16*t1-16*t2+Nz+Nx+5,64));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),16*t4+14);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(64*t4,4*t5+4); ubv=min(64*t4+63,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
outputdep-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // loop carried output dependence // x = .. : // // loop carried true dependence due to: // .. = x; // x = ..; #include <stdio.h> int a[100]; int main() { int len=100; int i,x=10; #pragma omp parallel for for (i=0;i<len;i++) { a[i] = x; x=i; } printf("x=%d",x); return 0; }
expected_output.c
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> #include <polybench.h> #include "trmm.h" /** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /*trmm.c: this file is part of PolyBench/C*/ /*Include polybench common header.*/ /*Include benchmark-specific header.*/ /*Array initialization.*/ static void init_array(int m, int n, double *alpha, double A[1000][1000], double B[1000][1200]) { int i, j; *alpha = 1.5; for(i = 0; i < m; i++) { for(j = 0; j < i; j++) { A[i][j] = (double) ((i + j) % m) / m; } A[i][i] = 1.0; for(j = 0; j < n; j++) { B[i][j] = (double) ((n + (i - j)) % n) / n; } } } /*DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output.*/ static void print_array(int m, int n, double B[1000][1200]) { int i, j; fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n"); fprintf(stderr, "begin dump: %s", "B"); for(i = 0; i < m; i++) for(j = 0; j < n; j++) { if((i * m + j) % 20 == 0) fprintf(stderr, "\n"); fprintf(stderr, "%0.2lf ", B[i][j]); } fprintf(stderr, "\nend dump: %s\n", "B"); fprintf(stderr, "==END DUMP_ARRAYS==\n"); } /*Main computational kernel. The whole function will be timed, including the call and return.*/ static void kernel_trmm(int m, int n, double alpha, double A[1000][1000], double B[1000][1200]) { int i, j, k; /*************** Clava msgError ************** unsolved dependency for arrayAccess B use : RW ****************************************/ for(i = 0; i < m; i++) { #pragma omp parallel for default(shared) private(j, k) firstprivate(n, i, m, alpha, A) for(j = 0; j < n; j++) { /*************** Clava msgError ************** unsolved dependency for arrayAccess B use : RW ****************************************/ for(k = i + 1; k < m; k++) B[i][j] += A[k][i] * B[k][j]; B[i][j] = alpha * B[i][j]; } } } int main(int argc, char **argv) { /*Retrieve problem size.*/ int m = 1000; int n = 1200; /*Variable declaration/allocation.*/ double alpha; double (*A)[1000][1000]; A = (double (*)[1000][1000]) polybench_alloc_data((1000 + 0) * (1000 + 0), sizeof(double)); ; double (*B)[1000][1200]; B = (double (*)[1000][1200]) polybench_alloc_data((1000 + 0) * (1200 + 0), sizeof(double)); ; /*Initialize array(s).*/ init_array(m, n, &alpha, *A, *B); /*Start timer.*/ ; /*Run kernel.*/ kernel_trmm(m, n, alpha, *A, *B); /*Stop and print timer.*/ ; ; /*Prevent dead-code elimination. All live-out data must be printed by the function call in argument.*/ if(argc > 42 && !strcmp(argv[0], "")) print_array(m, n, *B); /*Be clean.*/ free((void *) A); ; free((void *) B); ; return 0; }
MergeSort.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> void merge(int arr[], int l, int m, int r) { int i, j, k; int n1 = m - l + 1; int n2 = r - m; /* create temp arrays */ int L[n1], R[n2]; /* Copy data to temp arrays L[] and R[] */ #pragma omp parallel { #pragma omp simd //parallel for shared(i) /*reduction(+:L[:10])*/ schedule(dynamic,100) for (i = 0; i < n1; i++) # L[i] = arr[l + i]; } for (j = 0; j < n2; j++) R[j] = arr[m + 1+ j]; /* Merge the temp arrays back into arr[l..r]*/ i = 0; // Initial index of first subarray j = 0; // Initial index of second subarray k = l; // Initial index of merged subarray while (i < n1 && j < n2) { if (L[i] <= R[j]) { arr[k] = L[i]; i++; } else { arr[k] = R[j]; j++; } k++; } /* Copy the remaining elements of L[], if there are any */ while (i < n1) { arr[k] = L[i]; i++; k++; } /* Copy the remaining elements of R[], if there are any */ while (j < n2) { arr[k] = R[j]; j++; k++; } } void mergeSort(int arr[], int l, int r) { if (l < r) { // Same as (l+r)/2, but avoids overflow for // large l and h int m = l+(r-l)/2; // Sort first and second halves mergeSort(arr, l, m); mergeSort(arr, m+1, r); merge(arr, l, m, r); } } /* Driver program to test above functions */ int main() { int arr_size = 10000; int arr[arr_size]; int buff = 0; // Read file FILE * fp = fopen("../../in.txt","r"); fscanf(fp, "%d", &buff); for(int x = 0; x < arr_size; x++){ arr[x] = buff; fscanf(fp, "%d", &buff); } fclose(fp); mergeSort(arr, 0, arr_size - 1); // out.txt FILE * fp2 = fopen("out.txt","w+"); for(int x = 0; x < arr_size; x++){ fprintf(fp2, "%d\n", arr[x]); } fclose(fp2); return 0; }
nanoBragg_standalone.c
/* perfect-lattice nanocrystal diffraction simulator -James Holton and Ken Frankel 2-3-18 example: gcc -O3 -o nanoBragg nanoBragg.c -lm -fopenmp ./nanoBragg -mat auto.mat -hkl P1.hkl -distance 2500 ./nanoBragg -mat A.mat -hkl P1.hkl -lambda 1 -dispersion 0.1 -dispstep 3 -distance 100 -detsize 100 -pixel 0.1 \ -hdiv 0.28 -hdivstep 0.02 -vdiv 0.28 -vdivstep 0.02 \ -fluence 1e24 -N 0 \ -water 0 ./nanoBragg -cell 74 74 36 90 90 90 -misset 10 20 30 \ -hkl P1.hkl -lambda 1 -dispersion 0.1 -dispstep 3 -distance 100 -detsize 100 -pixel 0.1 \ -hdiv 0.28 -hdivstep 0.02 -vdiv 0.28 -vdivstep 0.02 \ -fluence 1e24 -N 0 \ -water 0 lattice positions and wavelength (lambda) should be provided in Angstrom, three numbers per line detector distance, detsize and pixel size in mm divergence in mrad dispersion in percent phi and osc are in degrees fluence is in photons/meter^2 (integrated exposure time) Na, Nb, Nc, are the number of unit cells along the a,b,c axes, respectively note that any of Na,Nb,Nc can be zero to simulate an isolated unit cell (SAXS) water is the thickness in microns of "water" also traversed by the beam this generates a simplitic background: that from a material with density 1.0 and isotropic structure factor of 2.57 electrons (the forward-scattered structure factor of water more complicated backgrounds can be made in a separate run of this program using Na=Nb=Nc=0. auto.mat can be an orientation matrix from MOSFLM, or simply a text file of the three reciprocal lattice vector components along x,y,z: a_star_x b_star_x c_star_x a_star_y b_star_y c_star_y a_star_z b_star_z c_star_z you can also simply specify the unit cell with -cell and some miss-setting angles with -misset P1.hkl should be a text file containing h k l F for EVERY spot that has an intensity (including F000). No symmetry operators will be imposed by this program. Not even Friedel symmetry. Since reading the HKL file can often be the slowest step, this program will create a binary "dumpfile" in the current working directory that it will re-read upon subsequent runs if -hkl is not specified. Please note that unlike nearBragg, this program does not work in the near field, so detector distances should always be much larger than the crystal size */ #define _USE_MATH_DEFINES #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <math.h> #include <time.h> #include <limits.h> #include <float.h> #ifndef NAN #define NAN strtod("NAN",NULL) #endif #define TRUE 1 #define FALSE 0 #define Avogadro 6.02214179e23 /* read in text file into double arrays at provided addresses */ size_t read_text_file(char *filename, size_t nargs, ... ); /* cubic spline interpolation functions */ void polint(double *xa, double *ya, double x, double *y); void polin2(double *x1a, double *x2a, double **ya, double x1,double x2, double *y); void polin3(double *x1a, double *x2a, double *x3a, double ***ya, double x1,double x2, double x3, double *y); /* rotate a 3-vector in space applied in order phix,phiy,phiz*/ double *rotate(double *v, double *newv, double phix, double phiy, double phiz); /* rotate a 3-vector about a unit vector axis */ double *rotate_axis(double *v, double *newv, double *axis, double phi); /* rotate a 3-vector using a 9-element unitary matrix */ double *rotate_umat(double *v, double *newv, double *umat); /* vector cross product where vector magnitude is 0th element */ double *cross_product(double *x, double *y, double *z); /* vector inner product where vector magnitude is 0th element */ double dot_product(double *x, double *y); /* compute difference between two vectors */ double vector_diff(double *vector, double *origin_vector, double *new_vector); /* measure magnitude of vector and put it in 0th element */ double magnitude(double *vector); /* scale the magnitude of a vector */ double vector_scale(double *vector, double *new_vector, double scale); /* force the magnitude of vector to given value */ double vector_rescale(double *vector, double *new_vector, double magnitude); /* make a unit vector pointing in same direction and report magnitude (both args can be same vector) */ double unitize(double *vector, double *new_unit_vector); /* polarization factor from vectors */ double polarization_factor(double kahn_factor, double *incident, double *diffracted, double *axis); /* generate unit vector in random direction */ float uniform3Ddev(float *dx, float *dy, float *dz, long *idum); /* generate random unitary rotation matrix within a spherical cap */ double *mosaic_rotation_umat(float mosaicity, double umat[9], long *idum); /* convert unitary matrix into missetting angles */ double *umat2misset(double umat[9],double *missets); /* random deviate with Poisson distribution */ float poidev(float xm, long *idum); /* random deviate with Gaussian distribution */ float gaussdev(long *idum); /* random deviate with Lorentzian distribution */ float lorentzdev(long *idum); /* random deviate with triangle-shaped distribution */ float triangledev(long *idum); /* random deviate with exponential distribution (>0) */ float expdev(long *idum); /* random deviate with uniform distribution */ float ran1(long *idum); /* Fourier transform of a truncated lattice */ double sincg(double x, double N); /* Fourier transform of a sphere */ double sinc3(double x); /* Fourier transform of a spherically-truncated lattice */ double sinc_conv_sinc3(double x); /* file stuff */ char *matfilename = NULL; char *hklfilename = NULL; char *dumpfilename = "Fdump.bin\0"; char *stolfilename = NULL; char *imginfilename = NULL; char *maskfilename = NULL; char *stoloutfilename = "output.stol\0"; char *sourcefilename = NULL; char *floatfilename = "floatimage.bin\0"; //char *sinfilename = "sinimage.bin\0"; //char *cosfilename = "cosimage.bin\0"; char *intfilename = "intimage.img\0"; char *pgmfilename = "image.pgm\0"; char *noisefilename = "noiseimage.img\0"; FILE *infile = NULL; FILE *Fdumpfile = NULL; FILE *outfile = NULL; FILE *stoloutfile = NULL; typedef enum { SAMPLE, BEAM } pivot; typedef enum { SQUARE, ROUND, GAUSS, TOPHAT } shapetype; typedef enum { CUSTOM, ADXV, MOSFLM, XDS, DIALS, DENZO } convention; /* frame handling routines */ typedef struct _SMVinfo { char *filename; FILE *handle; int swap_bytes; int header_size; int width; int height; char *header; unsigned short int *mmapdata; } SMVinfo; /* SMV image handling routines */ SMVinfo GetFrame(char *filename); double ValueOf( const char *keyword, SMVinfo smvfile); char *get_byte_order(); unsigned char *read_pgm5_bytes(char *filename,unsigned int *returned_width,unsigned int *returned_height); int main(int argc, char** argv) { /* progress meter stuff */ long progress_pixel,progress_pixels; int progress_meter=1; int babble=1; int printout = 0; int printout_spixel=-1,printout_fpixel=-1; /* x-ray beam properties */ double beam_vector[4] = {0,1,0,0}; int coherent = 0; int far_source = 1; int round_div = 1; double lambda,*lambda_of; double mosaic_spread=-1.0,*mosaic_umats,mosaic_missets[4]; double umat[9]; double dispersion=0.0,dispstep=-1,lambda0 = 1.0e-10; double hdiv,hdivstep=-1.0,hdivrange= -1.0; double vdiv,vdivstep=-1.0,vdivrange= -1.0; double source_path,source_distance = 10.0; int divsteps=-1,hdivsteps=-1,vdivsteps=-1,dispsteps=-1; int hdiv_tic,vdiv_tic,disp_tic,mos_tic; int mosaic_domains=-1; double weight; int source,sources; double *source_X,*source_Y,*source_Z,*source_I,*source_lambda; /* Thomson cross section (m^2) */ double r_e_sqr = 7.94079248018965e-30; /* incident x-ray fluence in photons/m^2 */ double fluence = 125932015286227086360700780544.0; double flux=0.0,exposure=1.0,beamsize=1e-4; /* sample size stuff */ int N=1; double Na=1.0,Nb=1.0,Nc=1.0; double xtalsize_max,xtalsize_a,xtalsize_b,xtalsize_c; double reciprocal_pixel_size; shapetype xtal_shape = SQUARE; double hrad_sqr,fudge=1; double sample_x = 0; /* m */ double sample_y = 0; /* m */ double sample_z = 0; /* m */ double density = 1.0e6; /* g/m^3 */ double molecular_weight = 18.0; /* g/mol */ double volume=0.0,molecules = 0.0; /* scale factor = F^2*r_e_sqr*fluence*Avogadro*volume*density/molecular_weight m^2 ph/m^2 /mol m^3 g/m^3 g/mol */ double water_size = 0.0; double water_F = 2.57; double water_MW = 18.0; /* water F = 2.57 in forward direction */ /* detector stuff */ double pixel_size = 0.1e-3; double pixel_pos[4]; int fpixel,spixel,fpixels=0,spixels=0,pixels; double distance = 100.0e-3; double detsize_f = 102.4e-3; double detsize_s = 102.4e-3; double detector_mu=0.0,detector_thick=0.0,detector_thickstep=-1.0,parallax,capture_fraction; int detector_thicksteps=-1,thick_tic; double fdet_vector[4] = {0,0,0,1}; double sdet_vector[4] = {0,0,-1,0}; double odet_vector[4] = {0,1,0,0}; double pix0_vector[4] = {0,0,0,0}; double detector_rotx=0.0,detector_roty=0.0,detector_rotz=0.0; double twotheta_axis[4] = {0,0,1,0}; pivot detector_pivot = BEAM; convention beam_convention = MOSFLM; double detector_twotheta = 0.0; double airpath,omega_pixel,omega_Rsqr_pixel,omega_sum; int curved_detector = 0; int point_pixel= 0; /* beam center value that goes into the image header */ double Xbeam=NAN,Ybeam=NAN; /* direct beam coordinate on fast/slow pixel axes; used for diffraction if pivot=beam */ double Fbeam=NAN,Sbeam=NAN; double Fdet,Sdet,Odet; double Fdet0,Sdet0; /* nearest point on detector for detector at rotations=0 */ double Xclose=NAN,Yclose=NAN,close_distance=NAN; /* near point in fast/slow pixel units; used for diffraction if pivot=sample */ double Fclose=NAN,Sclose=NAN; /* fast/slow near-point position in pixels */ double ORGX=NAN,ORGY=NAN; /* similar to pix0,vector but with dials-default vectors */ double dials_origin[4] = {0,0,0,0}; double adc_offset = 40.0; /* scattering vectors */ double incident[4]; double diffracted[4],diffracted0[4]; double scattering[4]; double stol,twotheta,theta; /* diffraction geometry stuff */ double costwotheta,sintwotheta,psi=0; double xd,yd,zd,xd0,yd0,zd0; double Ewald[4],Ewald0[4],relp[4]; double dmin=0; int integral_form = 0; /* polarization stuff */ double polar_vector[4] = {0,0,0,1}; double vert_vector[4]; double polar=1.0,polarization=0.0; int nopolar = 0; /* sampling */ int steps; int roi_xmin=-1,roi_xmax=-1,roi_ymin=-1,roi_ymax=-1; int oversample = -1,recommended_oversample,subS,subF; int oversample_thick = 0; int oversample_polar = 0; int oversample_omega = 0; double subpixel_size; /* spindle */ double phi,phi0=0.0,phistep=-1.0,osc=-1.0; int phi_tic,phisteps=-1; double spindle_vector[4] = {0,0,0,1}; /* structure factor representation */ double phase,Fa,Fb; double F,F_bg,*stol_of,*F_of; double ***Fhkl; double default_F=0.0; int hkls=0; double F_latt,F_cell; double F_highangle,F_lowangle; int stols,nearest=0; double stol_file_mult=1.0e10; double denom; /* intensity stats */ double I,I_bg; double max_I = 0.0; double max_I_x = 0.0,max_I_y = 0.0; double intfile_scale = 0.0; double pgm_scale = 0.0; double sum,sumsqr,avg,rms,rmsd; int sumn = 0; int overloads = 0; /* image file data */ float *floatimage; int imgidx; SMVinfo maskfile; unsigned short int *maskimage = NULL; // float *sinimage; // float *cosimage; unsigned short int *intimage = NULL; unsigned char *pgmimage = NULL; char *byte_order = get_byte_order(); SMVinfo imginfile; float *imginfileimage = NULL; /* misc variables */ int i,j,n; double X,Y,Z; double ratio,r; double X0,Y0,Z0,d_r; double RTD=180.0*M_1_PI; double test; double vector[4]; double newvector[4]; long seed; seed = -time((time_t *)0); // printf("random number seed = %u\n",seed); long mosaic_seed = -12345678; /* interpolation arrays */ int interpolate = 2; double ***sub_Fhkl; int h_interp[5],k_interp[5],l_interp[5]; double h_interp_d[5],k_interp_d[5],l_interp_d[5]; double h,k,l; int h0,k0,l0,h_range,k_range,l_range,h_min,h_max,k_min,k_max,l_min,l_max; int h0_flr,k0_flr,l0_flr; int i1=0, i2=0, i3=0; /* unit cell stuff */ int user_cell = 0; double a[4] = {0,0,0,0}; double b[4] = {0,0,0,0}; double c[4] = {0,0,0,0}; double a0[4],b0[4],c0[4]; double ap[4],bp[4],cp[4]; double alpha=0.0,beta=0.0,gamma=0.0; double a_star[4],b_star[4],c_star[4]; double a_star0[4],b_star0[4],c_star0[4]; double alpha_star,beta_star,gamma_star; double a_cross_b[4],b_cross_c[4],c_cross_a[4]; double a_star_cross_b_star[4],b_star_cross_c_star[4],c_star_cross_a_star[4]; double V_cell,V_star,skew,aavg; double sin_alpha,sin_beta,sin_gamma; double cos_alpha,cos_beta,cos_gamma; double sin_alpha_star,sin_beta_star,sin_gamma_star; double cos_alpha_star,cos_beta_star,cos_gamma_star; double misset[4] = {0,0,0,0}; /* special options */ int calculate_noise = 1; int write_pgm = 1; /* check argument list */ for(i=1; i<argc; ++i) { if(argv[i][0] == '-') { /* option specified */ if(strstr(argv[i], "-img") && (argc > (i+1))) { imginfilename = argv[i+1]; } if(strstr(argv[i], "-mask") && (argc > (i+1))) { maskfilename = argv[i+1]; } } } /* read in any provided mask file */ if(maskfilename != NULL) { /* frame handling routines */ maskfile = GetFrame(maskfilename); if(maskfile.header_size > 0) { fpixels = maskfile.width; spixels = maskfile.height; pixels = fpixels*spixels; test = ValueOf("PIXEL_SIZE",maskfile); if(! isnan(test)) pixel_size = test/1000.0; detsize_f = pixel_size*fpixels; detsize_s = pixel_size*spixels; test = ValueOf("DISTANCE",maskfile); if(! isnan(test)) distance = test/1000.0; test = ValueOf("CLOSE_DISTANCE",maskfile); if(! isnan(test)) close_distance = test/1000.0; test = ValueOf("WAVELENGTH",maskfile); if(! isnan(test)) lambda0 = test/1e10; test = ValueOf("BEAM_CENTER_X",maskfile); if(! isnan(test)) Xbeam = test/1000.0; test = ValueOf("BEAM_CENTER_Y",maskfile); if(! isnan(test)) Ybeam = detsize_s - test/1000.0; test = ValueOf("ORGX",maskfile); if(! isnan(test)) ORGX = test; test = ValueOf("ORGY",maskfile); if(! isnan(test)) ORGY = test; test = ValueOf("PHI",maskfile); if(! isnan(test)) phi0 = test/RTD; test = ValueOf("OSC_RANGE",maskfile); if(! isnan(test)) osc = test/RTD; test = ValueOf("TWOTHETA",maskfile); if(! isnan(test)) twotheta = test/RTD; maskimage = (unsigned short int*) calloc(pixels+10,sizeof(unsigned short int)); imgidx = maskfile.header_size / sizeof(unsigned short int); for(i=0;i<pixels;++i){ maskimage[i] = (float) maskfile.mmapdata[imgidx]; ++imgidx; } } } /* read in any provided img file (mostly for the header) */ if(imginfilename != NULL) { /* frame handling routines */ imginfile = GetFrame(imginfilename); if(imginfile.header_size > 0) { fpixels = imginfile.width; spixels = imginfile.height; pixels = fpixels*spixels; test = ValueOf("PIXEL_SIZE",imginfile); if(! isnan(test)) pixel_size = test/1000.0; detsize_f = pixel_size*fpixels; detsize_s = pixel_size*spixels; test = ValueOf("DISTANCE",imginfile); if(! isnan(test)) distance = test/1000.0; test = ValueOf("CLOSE_DISTANCE",imginfile); if(! isnan(test)) close_distance = test/1000.0; test = ValueOf("WAVELENGTH",imginfile); if(! isnan(test)) lambda0 = test/1e10; test = ValueOf("BEAM_CENTER_X",imginfile); if(! isnan(test)) Xbeam = test/1000.0; test = ValueOf("BEAM_CENTER_Y",imginfile); if(! isnan(test)) Ybeam = test/1000.0; test = ValueOf("ORGX",imginfile); if(! isnan(test)) ORGX = test; test = ValueOf("ORGY",imginfile); if(! isnan(test)) ORGY = test; test = ValueOf("PHI",imginfile); if(! isnan(test)) phi0 = test/RTD; test = ValueOf("OSC_RANGE",imginfile); if(! isnan(test)) osc = test/RTD; test = ValueOf("TWOTHETA",imginfile); if(! isnan(test)) twotheta = test/RTD; imginfileimage = (float *) calloc(pixels+10,sizeof(float)); imgidx = imginfile.header_size / sizeof(unsigned short int); for(i=0;i<pixels;++i){ imginfileimage[i] = (float) imginfile.mmapdata[imgidx]; ++imgidx; } } } /* check argument list for options */ for(i=1; i<argc; ++i) { if(argv[i][0] == '-') { /* option specified */ if(strstr(argv[i], "-Na") && (argc > (i+1))) { Na = atoi(argv[i+1]); continue; } if(strstr(argv[i], "-Nb") && (argc > (i+1))) { Nb = atoi(argv[i+1]); continue; } if(strstr(argv[i], "-Nc") && (argc > (i+1))) { Nc = atoi(argv[i+1]); continue; } if(0==strcmp(argv[i], "-N") && (argc > (i+1))) { Na = Nb = Nc = atoi(argv[i+1]); continue; } if(strstr(argv[i], "-cell") && (argc > (i+1))) { user_cell = 1; if(argc <= (i+1)) continue; if(argv[i+1][0] == '-') continue; a[0] = atof(argv[i+1]); if(argc <= (i+2)) continue; if(argv[i+2][0] == '-') continue; b[0] = atof(argv[i+2]); if(argc <= (i+3)) continue; if(argv[i+3][0] == '-') continue; c[0] = atof(argv[i+3]); if(argc <= (i+4)) continue; if(argv[i+4][0] == '-') continue; alpha = atof(argv[i+4])/RTD; if(argc <= (i+5)) continue; if(argv[i+5][0] == '-') continue; beta = atof(argv[i+5])/RTD; if(argc <= (i+6)) continue; if(argv[i+6][0] == '-') continue; gamma = atof(argv[i+6])/RTD; } if(strstr(argv[i], "-misset") && (argc > (i+1))) { if(strstr(argv[i+1],"rand")) { misset[0] = -1; continue; } } if(strstr(argv[i], "-misset") && (argc > (i+3))) { misset[0] = 1; misset[1] = atof(argv[i+1])/RTD; misset[2] = atof(argv[i+2])/RTD; misset[3] = atof(argv[i+3])/RTD; } if((strstr(argv[i], "-samplesize") || strstr(argv[i], "-sample_size")) && (argc > (i+1))) { sample_x = atof(argv[i+1])/1000; sample_y = atof(argv[i+1])/1000; sample_z = atof(argv[i+1])/1000; } if((strstr(argv[i], "-sample_thick") || strstr(argv[i], "-sample_x") ) && (argc > (i+1))) { sample_x = atof(argv[i+1])/1000; } if((strstr(argv[i], "-sample_width") || strstr(argv[i], "-sample_y") || strstr(argv[i], "-width")) && (argc > (i+1))) { sample_y = atof(argv[i+1])/1000; } if((strstr(argv[i], "-sample_heigh") || strstr(argv[i], "-sample_z") || strstr(argv[i], "-heigh")) && (argc > (i+1))) { sample_z = atof(argv[i+1])/1000; } if((strstr(argv[i], "-xtalsize") || strstr(argv[i], "-xtal_size")) && (argc > (i+1))) { sample_x = atof(argv[i+1])/1000; sample_y = atof(argv[i+1])/1000; sample_z = atof(argv[i+1])/1000; } if((strstr(argv[i], "-xtal_thick") || strstr(argv[i], "-xtal_x") ) && (argc > (i+1))) { sample_x = atof(argv[i+1])/1000; } if((strstr(argv[i], "-xtal_width") || strstr(argv[i], "-xtal_y") || strstr(argv[i], "-width")) && (argc > (i+1))) { sample_y = atof(argv[i+1])/1000; } if((strstr(argv[i], "-xtal_heigh") || strstr(argv[i], "-xtal_z") || strstr(argv[i], "-heigh")) && (argc > (i+1))) { sample_z = atof(argv[i+1])/1000; } if((strstr(argv[i], "-density") || strstr(argv[i], "-sample_den")) && (argc > (i+1))) { density = atof(argv[i+1])*1e6; } if((0==strcmp(argv[i], "-MW") || strstr(argv[i], "-molec")) && (argc > (i+1))) { molecular_weight = atof(argv[i+1]); } if(strstr(argv[i], "-Xbeam") && (argc > (i+1))) { Xbeam = atof(argv[i+1])/1000.0; detector_pivot = BEAM; } if(strstr(argv[i], "-Ybeam") && (argc > (i+1))) { Ybeam = atof(argv[i+1])/1000.0; detector_pivot = BEAM; } if(strstr(argv[i], "-Xclose") && (argc > (i+1))) { Xclose = atof(argv[i+1])/1000.0; detector_pivot = SAMPLE; } if(strstr(argv[i], "-Yclose") && (argc > (i+1))) { Yclose = atof(argv[i+1])/1000.0; detector_pivot = SAMPLE; } if(strstr(argv[i], "-ORGX") && (argc > (i+1))) { ORGX = atof(argv[i+1]); detector_pivot = SAMPLE; } if(strstr(argv[i], "-ORGY") && (argc > (i+1))) { ORGY = atof(argv[i+1]); detector_pivot = SAMPLE; } if(strstr(argv[i], "-pivot") && (argc > (i+1))) { if(strstr(argv[i+1], "sample")) detector_pivot = SAMPLE; if(strstr(argv[i+1], "beam")) detector_pivot = BEAM; } if(strstr(argv[i], "-mosflm")) { beam_convention = MOSFLM; detector_pivot = BEAM; } if(strstr(argv[i], "-xds")) { beam_convention = XDS; detector_pivot = SAMPLE; } if(strstr(argv[i], "-adxv")) { beam_convention = ADXV; detector_pivot = BEAM; } if(strstr(argv[i], "-denzo")) { beam_convention = DENZO; detector_pivot = BEAM; } if(strstr(argv[i], "-dials")) { beam_convention = DIALS; detector_pivot = BEAM; } if(strstr(argv[i], "-fdet_vector") && (argc > (i+3))) { beam_convention = CUSTOM; fdet_vector[1] = atof(argv[i+1]); fdet_vector[2] = atof(argv[i+2]); fdet_vector[3] = atof(argv[i+3]); } if(strstr(argv[i], "-sdet_vector") && (argc > (i+3))) { beam_convention = CUSTOM; sdet_vector[1] = atof(argv[i+1]); sdet_vector[2] = atof(argv[i+2]); sdet_vector[3] = atof(argv[i+3]); } if(strstr(argv[i], "-odet_vector") && (argc > (i+3))) { beam_convention = CUSTOM; odet_vector[1] = atof(argv[i+1]); odet_vector[2] = atof(argv[i+2]); odet_vector[3] = atof(argv[i+3]); } if(strstr(argv[i], "-beam_vector") && (argc > (i+3))) { beam_convention = CUSTOM; beam_vector[1] = atof(argv[i+1]); beam_vector[2] = atof(argv[i+2]); beam_vector[3] = atof(argv[i+3]); } if(strstr(argv[i], "-polar_vector") && (argc > (i+3))) { beam_convention = CUSTOM; polar_vector[1] = atof(argv[i+1]); polar_vector[2] = atof(argv[i+2]); polar_vector[3] = atof(argv[i+3]); } if(strstr(argv[i], "-spindle_axis") && (argc > (i+3))) { beam_convention = CUSTOM; spindle_vector[1] = atof(argv[i+1]); spindle_vector[2] = atof(argv[i+2]); spindle_vector[3] = atof(argv[i+3]); } if(strstr(argv[i], "-twotheta_axis") && (argc > (i+3))) { beam_convention = CUSTOM; twotheta_axis[1] = atof(argv[i+1]); twotheta_axis[2] = atof(argv[i+2]); twotheta_axis[3] = atof(argv[i+3]); } if(strstr(argv[i], "-pix0_vector") && (argc > (i+3))) { beam_convention = CUSTOM; pix0_vector[0] = 1.0; pix0_vector[1] = atof(argv[i+1]); pix0_vector[2] = atof(argv[i+2]); pix0_vector[3] = atof(argv[i+3]); } if(strstr(argv[i], "-distance") && (argc > (i+1))) { distance = atof(argv[i+1])/1000.0; detector_pivot = BEAM; } if(strstr(argv[i], "-close_distance") && (argc > (i+1))) { close_distance = atof(argv[i+1])/1000.0; detector_pivot = SAMPLE; } // if(strstr(argv[i], "-source_dist") && (argc > (i+1))) // { // source_distance = atof(argv[i+1])/1000.0; // } if(strstr(argv[i], "-detector_abs") && (argc >= (i+1))) { if(strstr(argv[i+1], "inf") || atof(argv[i+1]) == 0.0) { detector_thick = 0.0; detector_mu = 0.0; }else{ detector_mu = 1.0/(atof(argv[i+1])*1e-6); } } if(strstr(argv[i], "-detector_thick") && (strlen(argv[i]) == 15) && (argc >= (i+1))) { detector_thick = atof(argv[i+1])*1e-6; } if(strstr(argv[i], "-detector_thicksteps") && (argc >= (i+1))) { detector_thicksteps = atoi(argv[i+1]); } if(strstr(argv[i], "-thicksteps") && (argc >= (i+1))) { detector_thicksteps = atoi(argv[i+1]); } if(strstr(argv[i], "-twotheta") && (argc > (i+1))) { detector_twotheta = atof(argv[i+1])/RTD; detector_pivot = SAMPLE; } if(strstr(argv[i], "-detector_rotx") && (argc > (i+1))) { detector_rotx = atof(argv[i+1])/RTD; } if(strstr(argv[i], "-detector_roty") && (argc > (i+1))) { detector_roty = atof(argv[i+1])/RTD; } if(strstr(argv[i], "-detector_rotz") && (argc > (i+1))) { detector_rotz = atof(argv[i+1])/RTD; } if(strstr(argv[i], "-detsize") && (strlen(argv[i]) == 8) && (argc > (i+1))) { detsize_f = atof(argv[i+1])/1000.0; detsize_s = atof(argv[i+1])/1000.0; } if(strstr(argv[i], "-detsize_f") && (argc > (i+1))) { detsize_f = atof(argv[i+1])/1000.0; } if(strstr(argv[i], "-detsize_s") && (argc > (i+1))) { detsize_s = atof(argv[i+1])/1000.0; } if(strstr(argv[i], "-detpixels") && (strlen(argv[i]) == 10) && (argc > (i+1))) { fpixels = spixels = atoi(argv[i+1]); } if(strstr(argv[i], "-detpixels_f") && (argc > (i+1))) { fpixels = atoi(argv[i+1]); } if(strstr(argv[i], "-detpixels_s") && (argc > (i+1))) { spixels = atoi(argv[i+1]); } if(strstr(argv[i], "-curved_det") && (argc > (i+1))) { curved_detector = 1; } if(strstr(argv[i], "-pixel") && (argc > (i+1))) { pixel_size = atof(argv[i+1])/1000.0; } if(strstr(argv[i], "-point_pixel") ) { point_pixel = 1; } if(strstr(argv[i], "-polar") && (strlen(argv[i]) == 6) && (argc > (i+1))) { polarization = atof(argv[i+1]); nopolar = 0; } if(strstr(argv[i], "-nopolar") ) { nopolar = 1; } if(strstr(argv[i], "-oversample_thick") ) { oversample_thick = 1; continue; } if(strstr(argv[i], "-oversample_polar") ) { oversample_polar = 1; continue; } if(strstr(argv[i], "-oversample_omega") ) { oversample_omega = 1; continue; } if(strstr(argv[i], "-oversample") && (argc > (i+1))) { oversample = atoi(argv[i+1]); } if(strstr(argv[i], "-roi") && (argc > (i+4))) { roi_xmin = atoi(argv[i+1]); roi_xmax = atoi(argv[i+2]); roi_ymin = atoi(argv[i+3]); roi_ymax = atoi(argv[i+4]); } if((strstr(argv[i], "-lambda") || strstr(argv[i], "-wave")) && (argc > (i+1))) { lambda0 = atof(argv[i+1])/1.0e10; } if(strstr(argv[i], "-energy") && (argc > (i+1))) { lambda0 = (12398.42/atof(argv[i+1]))/1.0e10; } if(strstr(argv[i], "-fluence") && (argc > (i+1))) { fluence = atof(argv[i+1]); } if(strstr(argv[i], "-flux") && (argc > (i+1))) { flux = atof(argv[i+1]); } if(strstr(argv[i], "-exposure") && (argc > (i+1))) { exposure = atof(argv[i+1]); } if(strstr(argv[i], "-beamsize") && (argc > (i+1))) { beamsize = atof(argv[i+1])/1000; } if((strstr(argv[i], "-mosaic") && (strlen(argv[i]) == 7) || strstr(argv[i], "-mosaici") || strstr(argv[i], "-mosaic_spr")) && (argc > (i+1))) { mosaic_spread = atof(argv[i+1])/RTD; } if(strstr(argv[i], "-mosaic_dom") && (argc > (i+1))) { mosaic_domains = atoi(argv[i+1]); } if(strstr(argv[i], "-dispersion") && (argc > (i+1))) { dispersion = atof(argv[i+1])/100.0; } if(strstr(argv[i], "-dispsteps") && (argc > (i+1))) { dispsteps = atoi(argv[i+1]); } if(strstr(argv[i], "-divergence") && (argc > (i+1))) { hdivrange = vdivrange = atof(argv[i+1])/1000.0; } if(strstr(argv[i], "-hdivrange") && (argc > (i+1))) { hdivrange = atof(argv[i+1])/1000.0; } if(strstr(argv[i], "-vdivrange") && (argc > (i+1))) { vdivrange = atof(argv[i+1])/1000.0; } if(strstr(argv[i], "-hdivstep") && (strlen(argv[i]) == 9) && (argc > (i+1))) { hdivstep = atof(argv[i+1])/1000.0; } if(strstr(argv[i], "-hdivsteps") && (argc > (i+1))) { hdivsteps = atoi(argv[i+1]); } if(strstr(argv[i], "-vdivstep") && (strlen(argv[i]) == 9) && (argc > (i+1))) { vdivstep = atof(argv[i+1])/1000.0; } if(strstr(argv[i], "-vdivsteps") && (argc > (i+1))) { vdivsteps = atoi(argv[i+1]); } if(strstr(argv[i], "-divsteps") && (argc > (i+1))) { hdivsteps = vdivsteps = atoi(argv[i+1]); } if(strstr(argv[i], "-round_div") ) { /* cut to circle */ round_div = 1; } if(strstr(argv[i], "-square_div") ) { /* just raster */ round_div = 0; } if(strstr(argv[i], "-adc") && (argc > (i+1))) { adc_offset = atof(argv[i+1]); } if(strstr(argv[i], "-phi") && strlen(argv[i])==4 && (argc > (i+1))) { phi0 = atof(argv[i+1])/RTD; } if(strstr(argv[i], "-osc") && (argc > (i+1))) { osc = atof(argv[i+1])/RTD; } if(strstr(argv[i], "-phistep") && strlen(argv[i])==8 && (argc > (i+1))) { phistep = atof(argv[i+1])/RTD; } if(strstr(argv[i], "-phisteps") && (argc > (i+1))) { phisteps = atoi(argv[i+1]); } if(strstr(argv[i], "-dmin") && (argc > (i+1))) { dmin = atof(argv[i+1])*1e-10; } if(strstr(argv[i], "-mat") && (argc > (i+1))) { matfilename = argv[i+1]; } if(strstr(argv[i], "-hkl") && (argc > (i+1))) { hklfilename = argv[i+1]; } if(strstr(argv[i], "-default_F") && (argc > (i+1))) { default_F = atof(argv[i+1]); } if(strstr(argv[i], "-img") && (argc > (i+1))) { imginfilename = argv[i+1]; } if(strstr(argv[i], "-stolout") && strlen(argv[i])>7 && (argc > (i+1))) { stoloutfilename = argv[i+1]; } if(strstr(argv[i], "-stol") && strlen(argv[i])==5 && (argc > (i+1))) { stolfilename = argv[i+1]; stol_file_mult = 1e10; } if(strstr(argv[i], "-4stol") && strlen(argv[i])==6 && (argc > (i+1))) { stolfilename = argv[i+1]; stol_file_mult = 1e10/4; } if(strstr(argv[i], "-Q") && strlen(argv[i])==2 && (argc > (i+1))) { stolfilename = argv[i+1]; stol_file_mult = 1e10/M_PI/4; } if(strstr(argv[i], "-sourcefile") && (argc > (i+1))) { sourcefilename = argv[i+1]; } if((strstr(argv[i], "-floatfile") || strstr(argv[i], "-floatimage")) && (argc > (i+1))) { floatfilename = argv[i+1]; } if((strstr(argv[i], "-intfile") || strstr(argv[i], "-intimage")) && (argc > (i+1))) { intfilename = argv[i+1]; } if((strstr(argv[i], "-pgmfile") || strstr(argv[i], "-pgmimage")) && (argc > (i+1))) { pgmfilename = argv[i+1]; write_pgm = 1; } if((strstr(argv[i], "-noisefile") || strstr(argv[i], "-noiseimage")) && (argc > (i+1))) { noisefilename = argv[i+1]; calculate_noise = 1; } if(strstr(argv[i], "-nonoise") ) { /* turn off noise */ calculate_noise = 0; } if(strstr(argv[i], "-nopgm") ) { write_pgm = 0; } if(strstr(argv[i], "-scale") && (argc > (i+1))) { /* specify the scale for the intfile */ intfile_scale = atof(argv[i+1]); } if(strstr(argv[i], "-pgmscale") && (argc > (i+1))) { /* specify the scale for the intfile */ pgm_scale = atof(argv[i+1]); write_pgm = 1; } if(strstr(argv[i], "-coherent") ) { /* turn off incoherent addition */ coherent = 1; } if(strstr(argv[i], "-printout") ) { /* turn on console printing */ printout = 1; } if(strstr(argv[i], "-noprogress") ) { /* turn off progress meter */ progress_meter = 0; } if(strstr(argv[i], "-progress") ) { /* turn on progress meter */ progress_meter = 1; } if(strstr(argv[i], "-interpolate") ) { /* turn on tricubic interpolation */ interpolate = 1; } if(strstr(argv[i], "-nointerpolate") ) { /* turn off tricubic interpolation */ interpolate = 0; } if(strstr(argv[i], "-round_xtal") ) { /* use sinc3 */ xtal_shape = ROUND; } if(strstr(argv[i], "-square_xtal") ) { /* use sincg */ xtal_shape = SQUARE; } if(strstr(argv[i], "-gauss_xtal") ) { /* use Gaussian */ xtal_shape = GAUSS; } if(strstr(argv[i], "-binary_spots") || strstr(argv[i], "-tophat_spots")) { /* top hat */ xtal_shape = TOPHAT; } if(strstr(argv[i], "-fudge") && (argc > (i+1))) { fudge = atof(argv[i+1]); } if(strstr(argv[i], "-printout_pixel") && (argc > (i+2))) { printout_fpixel = atoi(argv[i+1]); printout_spixel = atoi(argv[i+2]); } if(strstr(argv[i], "-seed") && (argc > (i+1))) { seed = -atoi(argv[i+1]); } if(strstr(argv[i], "-mosaic_seed") && (argc > (i+1))) { mosaic_seed = -atoi(argv[i+1]); } if(strstr(argv[i], "-water") && (argc > (i+1))) { water_size = atof(argv[i+1])/1e6; } } } /* fill in blanks */ if(fpixels) { detsize_f = pixel_size*fpixels; } if(spixels) { detsize_s = pixel_size*spixels; } fpixels = ceil(detsize_f/pixel_size-0.5); spixels = ceil(detsize_s/pixel_size-0.5); pixels = fpixels*spixels; /* get fluence from flux */ if(flux != 0.0 && exposure > 0.0 && beamsize >= 0){ fluence = flux*exposure/beamsize/beamsize; } if(beamsize >= 0){ if(beamsize < sample_y){ printf("WARNING: clipping sample (%lg m high) with beam (%lg m)\n",sample_y,beamsize); sample_y = beamsize; } if(beamsize < sample_z){ printf("WARNING: clipping sample (%lg m wide) with beam (%lg m)\n",sample_z,beamsize); sample_z = beamsize; } } if(exposure > 0.0) { /* make sure flux is consistent with everything else */ flux = fluence/exposure*beamsize*beamsize; } /* straighten up sample properties */ // volume = sample_x*sample_y*sample_z; // molecules = volume*density*Avogadro/molecular_weight; /* defaults? */ if(! isnan(ORGX)) Fclose = (ORGX-0.5)*pixel_size; if(! isnan(ORGY)) Sclose = (ORGY-0.5)*pixel_size; /* place beam center halfway between four middle pixels */ /* place beam center at int(npix/2) location */ if(isnan(Fclose)) Fclose = (detsize_f - 0*pixel_size)/2.0; if(isnan(Sclose)) Sclose = (detsize_s + 0*pixel_size)/2.0; if(isnan(Xclose)) Xclose = Fclose; if(isnan(Yclose)) Yclose = Sclose; if(isnan(Fbeam)) Fbeam = Fclose; if(isnan(Sbeam)) Sbeam = Sclose; if(roi_xmin < 0) roi_xmin = 0; if(roi_xmax < 0) roi_xmax = fpixels; if(roi_ymin < 0) roi_ymin = 0; if(roi_ymax < 0) roi_ymax = spixels; progress_pixels = (roi_xmax-roi_xmin+1)*(roi_ymax-roi_ymin+1); if(beam_convention == ADXV) { /* first pixel is at 0,0 pix and pixel_size,pixel_size*npixels mm */ if(isnan(Xbeam)) Xbeam = (detsize_f + pixel_size)/2.0; if(isnan(Ybeam)) Ybeam = (detsize_s - pixel_size)/2.0; beam_vector[1]= 0; beam_vector[2]= 0; beam_vector[3]= 1; fdet_vector[1]= 1; fdet_vector[2]= 0; fdet_vector[3]= 0; sdet_vector[1]= 0; sdet_vector[2]= -1; sdet_vector[3]= 0; odet_vector[1]= 0; odet_vector[2]= 0; odet_vector[3]= 1; twotheta_axis[1]= -1; twotheta_axis[2]= 0; twotheta_axis[3]= 0; polar_vector[1]= 1; polar_vector[2]= 0; polar_vector[3]= 0; spindle_vector[1]= 1; spindle_vector[2]= 0; spindle_vector[3]= 0; Fbeam = Xbeam; Sbeam = detsize_s - Ybeam; detector_pivot = BEAM; } if(beam_convention == MOSFLM) { /* first pixel is at 0.5,0.5 pix and pixel_size/2,pixel_size/2 mm */ if(isnan(Xbeam)) Xbeam = (detsize_s + pixel_size)/2.0; if(isnan(Ybeam)) Ybeam = (detsize_f + pixel_size)/2.0; beam_vector[1]= 1; beam_vector[2]= 0; beam_vector[3]= 0; odet_vector[1]= 1; odet_vector[2]= 0; odet_vector[3]= 0; fdet_vector[1]= 0; fdet_vector[2]= 0; fdet_vector[3]= 1; sdet_vector[1]= 0; sdet_vector[2]= -1; sdet_vector[3]= 0; twotheta_axis[1]= 0; twotheta_axis[2]= 0; twotheta_axis[3]= -1; polar_vector[1]= 0; polar_vector[2]= 0; polar_vector[3]= 1; spindle_vector[1]= 0; spindle_vector[2]= 0; spindle_vector[3]= 1; Fbeam = Ybeam + 0.5*pixel_size; Sbeam = Xbeam + 0.5*pixel_size; detector_pivot = BEAM; } if(beam_convention == DENZO) { if(isnan(Xbeam)) Xbeam = (detsize_s + pixel_size)/2.0; if(isnan(Ybeam)) Ybeam = (detsize_f + pixel_size)/2.0; beam_vector[1]= 1; beam_vector[2]= 0; beam_vector[3]= 0; odet_vector[1]= 1; odet_vector[2]= 0; odet_vector[3]= 0; fdet_vector[1]= 0; fdet_vector[2]= 0; fdet_vector[3]= 1; sdet_vector[1]= 0; sdet_vector[2]= -1; sdet_vector[3]= 0; twotheta_axis[1]= 0; twotheta_axis[2]= 0; twotheta_axis[3]= -1; polar_vector[1]= 0; polar_vector[2]= 0; polar_vector[3]= 1; spindle_vector[1]= 0; spindle_vector[2]= 0; spindle_vector[3]= 1; Fbeam = Ybeam + 0.0*pixel_size; Sbeam = Xbeam + 0.0*pixel_size; detector_pivot = BEAM; } if(beam_convention == XDS) { if(isnan(Xbeam)) Xbeam = Xclose; if(isnan(Ybeam)) Ybeam = Yclose; beam_vector[1]= 0; beam_vector[2]= 0; beam_vector[3]= 1; fdet_vector[1]= 1; fdet_vector[2]= 0; fdet_vector[3]= 0; sdet_vector[1]= 0; sdet_vector[2]= 1; sdet_vector[3]= 0; odet_vector[1]= 0; odet_vector[2]= 0; odet_vector[3]= 1; twotheta_axis[1]= 1; twotheta_axis[2]= 0; twotheta_axis[3]= 0; polar_vector[1]= 1; polar_vector[2]= 0; polar_vector[3]= 0; spindle_vector[1]= 1; spindle_vector[2]= 0; spindle_vector[3]= 0; Fbeam = Xbeam; Sbeam = Ybeam; detector_pivot = SAMPLE; } if(beam_convention == DIALS) { if(isnan(Xbeam)) Xbeam = Xclose; if(isnan(Ybeam)) Ybeam = Yclose; beam_vector[1]= 0; beam_vector[2]= 0; beam_vector[3]= 1; fdet_vector[1]= 1; fdet_vector[2]= 0; fdet_vector[3]= 0; sdet_vector[1]= 0; sdet_vector[2]= 1; sdet_vector[3]= 0; odet_vector[1]= 0; odet_vector[2]= 0; odet_vector[3]= 1; twotheta_axis[1]= 0; twotheta_axis[2]= 1; twotheta_axis[3]= 0; polar_vector[1]= 0; polar_vector[2]= 1; polar_vector[3]= 0; spindle_vector[1]= 0; spindle_vector[2]= 1; spindle_vector[3]= 0; Fbeam = Xbeam; Sbeam = Ybeam; detector_pivot = SAMPLE; } if(beam_convention == CUSTOM) { if(isnan(Xbeam)) Xbeam = Xclose; if(isnan(Ybeam)) Ybeam = Yclose; Fbeam = Xbeam; Sbeam = Ybeam; Fclose = Xbeam; Sclose = Ybeam; } /* straighten up vectors */ unitize(beam_vector,beam_vector); unitize(fdet_vector,fdet_vector); unitize(sdet_vector,sdet_vector); if(unitize(odet_vector,odet_vector) != 1.0) { printf("WARNING: auto-generating odet_vector\n"); cross_product(fdet_vector,sdet_vector,odet_vector); unitize(odet_vector,odet_vector); } unitize(polar_vector,polar_vector); unitize(spindle_vector,spindle_vector); cross_product(beam_vector,polar_vector,vert_vector); unitize(vert_vector,vert_vector); printf("nanoBragg nanocrystal diffraction simulator - James Holton and Ken Frankel 5-17-17\n"); if(hklfilename == NULL) { /* see if there are Fs from a previous run */ Fdumpfile = fopen(dumpfilename,"r"); if(Fdumpfile == NULL && default_F == 0.0) { printf("ERROR: no hkl file and no dump file to read."); } } if(hklfilename == NULL && Fdumpfile == NULL && default_F == 0.0 || matfilename == NULL && a[0] == 0.0){ printf("usage: nanoBragg -mat auto.mat -hkl Fs.hkl\n"); printf("options:\n");\ printf("\t-mat filename.mat\tmosflm-style matrix file containing three reciprocal unit cell vectors\n"); printf("\t-hkl filename.hkl\ttext file containing h, k, l and F for P1 unit cell\n"); printf("\t-distance \tdistance from origin to detector center in mm\n"); printf("\t-detsize \tdetector size in mm. may also use -detsize_f -detsize_s\n"); printf("\t-detpixels \tdetector size in pixels. may also use -detpixels_x -detpixels_y\n"); printf("\t-pixel \tdetector pixel size in mm.\n"); printf("\t-detector_absorb \tdetector sensor material attenuation depth (um) (default: \"inf\" to save time)\n"); printf("\t-detector_thick \tdetector sensor thickness (um)\n"); printf("\t-detector_thicksteps\tnumber of layers of detector sensor material. Default: 1\n"); printf("\t-Xbeam \timage fast coordinate of direct-beam spot (mm). (default: center)\n"); printf("\t-Ybeam \timage slow coordinate of direct-beam spot (mm). (default: center)\n"); printf("\t-mosflm \tuse MOSFLM's direct-beam convention. (default: adxv)\n"); printf("\t-xds \tuse XDS detector origin convention. (default: adxv)\n"); printf("\t-twotheta \trotation of detector about spindle axis (deg). (default: 0)\n"); printf("\t-N \tnumber of unit cells in all directions. may also use -Na -Nb or -Nc\n"); printf("\t-square_xtal \tspecify parallelpiped crystal shape (default)\n"); printf("\t-round_xtal \tspecify ellipsoidal crystal shape (sort of)\n"); printf("\t-tophat_spots \tclip lattice transform at fwhm: no inter-Bragg maxima\n"); printf("\t-oversample \tnumber of sub-pixels per pixel. use this if xtalsize/lambda > distance/pixel\n"); printf("\t-oversample_thick \tre-calculate thickness effect for sub-pixels (not the default)\n"); printf("\t-oversample_polar \tre-calculate polarization effect for sub-pixels (not the default)\n"); printf("\t-oversample_omega \tre-calculate solid-angle effect for sub-pixels (not the default)\n"); printf("\t-lambda \tincident x-ray wavelength in Angstrom. may also use -energy in eV\n"); printf("\t-mosaic \tisotropic mosaic spread in degrees (use 90 for powder)\n"); printf("\t-mosaic_domains \tnumber of randomly-oriented mosaic domains to render\n"); printf("\t-dispersion \tspectral dispersion: delta-lambda/lambda in percent\n"); printf("\t-dispsteps \tnumber of wavelengths in above range\n"); printf("\t-hdivrange \thorizontal angular spread of source points in mrad\n"); printf("\t-vdivrange \tvertical angular spread of source points in mrad\n"); printf("\t-hdivstep \tnumber of source points in the horizontal\n"); printf("\t-vdivstep \tnumber of source points in the vertical\n"); printf("\t-square_div \tfull divergence grid (default: round off corners)\n"); printf("\t-phi \tstarting rotation value about spindle axis in degrees\n"); printf("\t-osc \trotation range about spindle axis in degrees\n"); printf("\t-phisteps \tnumber of rotation steps to render\n"); printf("\t-water \tadd contribution of x microns of water surrounding crystal\n"); printf("\t-floatfile \tname of binary output file (4-byte floats)\n"); printf("\t-intfile \tname of noiseless smv-formatted output file (not on absolute scale by default)\n"); printf("\t-scale \tscale factor to apply to intfile (default: autoscale)\n"); printf("\t-noisefile \tname of photon-scale smv-formatted output file (with Poisson noise)\n"); printf("\t-roi \tonly render part of the image: xmin xmax ymin ymax\n"); printf("\t-printout \tprint pixel values out to the screen\n"); printf("\t-seed \tspecify random-number seed for noisefile\n"); printf("\t-fluence \tincident beam intensity for photon-counting statistics (photons/m^2)\n"); printf("\t-nonoise \tdisable generating the noisefile\n"); printf("\t-noprogress \tturn off the progress meter\n"); printf("\t-nopolar \tturn off the polarization correction\n"); printf("\t-nointerpolate \tdisable inter-Bragg peak structure factor interpolation\n"); printf("\t-interpolate \tforce inter-Bragg peak structure factor interpolation (default: on if < 3 cells wide)\n"); printf("\t-point_pixel \tturn off the pixel solid angle correction\n"); printf("\t-curved_det \tall pixels same distance from crystal\n"); printf("\t-fdet_vector \tunit vector of increasing fast-axis detector pixel coordinate (default: %g %g %g)\n",fdet_vector[1],fdet_vector[2],fdet_vector[3]); printf("\t-sdet_vector \tunit vector of increasing slow-axis detector pixel coordinate (default: %g %g %g)\n",sdet_vector[1],sdet_vector[2],sdet_vector[3]); printf("\t-odet_vector \tunit vector of increasing detector distance (default: %g %g %g)\n",odet_vector[1],odet_vector[2],odet_vector[3]); printf("\t-beam_vector \tunit vector of x-ray beam direction (default: %g %g %g)\n",beam_vector[1],beam_vector[2],beam_vector[3]); printf("\t-polar_vector \tunit vector of x-ray E-vector polarization (default: %g %g %g)\n",polar_vector[1],polar_vector[2],polar_vector[3]); printf("\t-spindle_axis \tunit vector of right-handed phi rotation axis (default: %g %g %g)\n",spindle_vector[1],spindle_vector[2],spindle_vector[3]); printf("\t-pix0_vector \tvector from crystal to first pixel in image (default: beam centered on detector)\n"); // printf("\t-source_distance \tdistance of x-ray source from crystal (default: 10 meters)\n"); exit(9); } /* allocate detector memory */ floatimage = (float*) calloc(pixels+10,sizeof(float)); //sinimage = (float*) calloc(pixels+10,2*sizeof(float)); //cosimage = (float*) calloc(pixels+10,2*sizeof(float)); intimage = (unsigned short int*) calloc(pixels+10,sizeof(unsigned short int)); if(write_pgm) pgmimage = (unsigned char*) calloc(pixels+10,sizeof(unsigned char)); /* default sampling logic */ if(phisteps < 0){ /* auto-select number of phi steps */ if(osc < 0.0) { /* auto-select osc range */ if(phistep <= 0.0) { /* user doesn't care about anything */ phisteps = 1; osc = 0.0; phistep = 0.0; } else { /* user doesn't care about osc or steps, but specified step */ osc = phistep; phisteps = 2; } } else { /* user-speficied oscillation */ if(phistep <= 0.0) { /* osc specified, but nothing else */ phisteps = 2; phistep = osc/2.0; } else { /* osc and phi step specified */ phisteps = ceil(osc/phistep); } } } else { /* user-specified number of phi steps */ if(phisteps == 0) phisteps = 1; if(osc < 0.0) { /* auto-select osc range */ if(phistep <= 0.0) { /* user cares only about number of steps */ osc = 1.0/RTD; phistep = osc/phisteps; } else { /* user doesn't care about osc, but specified step */ osc = phistep; phisteps = 2; } } else { /* user-speficied oscillation */ if(phistep < 0.0) { /* osc and steps specified */ phistep = osc/phisteps; } else { /* everything specified */ } } } if(hdivsteps <= 0){ /* auto-select number of steps */ if(hdivrange < 0.0) { /* auto-select range */ if(hdivstep <= 0.0) { /* user doesn't care about anything */ hdivsteps = 1; hdivrange = 0.0; hdivstep = 0.0; } else { /* user specified stepsize and nothing else */ hdivrange = hdivstep; hdivsteps = 2; } } else { /* user-speficied range */ if(hdivstep <= 0.0) { /* range specified, but nothing else */ hdivstep = hdivrange; hdivsteps = 2; } else { /* range and step specified, but not number of steps */ hdivsteps = ceil(hdivrange/hdivstep); } } } else { /* user-specified number of steps */ if(hdivrange < 0.0) { /* auto-select range */ if(hdivstep <= 0.0) { /* user cares only about number of steps */ hdivrange = 1.0; hdivstep = hdivrange/hdivsteps; } else { /* user doesn't care about range */ hdivrange = hdivstep; hdivsteps = 2; } } else { /* user-speficied range */ if(hdivstep <= 0.0) { /* range and steps specified */ if(hdivsteps <=1 ) hdivsteps = 2; hdivstep = hdivrange/(hdivsteps-1); } else { /* everything specified */ } } } if(vdivsteps <= 0){ /* auto-select number of steps */ if(vdivrange < 0.0) { /* auto-select range */ if(vdivstep <= 0.0) { /* user doesn't care about anything */ vdivsteps = 1; vdivrange = 0.0; vdivstep = 0.0; } else { /* user specified stepsize and nothing else */ vdivrange = vdivstep; vdivsteps = 2; } } else { /* user-speficied range */ if(vdivstep <= 0.0) { /* range specified, but nothing else */ vdivstep = vdivrange; vdivsteps = 2; } else { /* range and step specified, but not number of steps */ vdivsteps = ceil(vdivrange/vdivstep); } } } else { /* user-specified number of steps */ if(vdivrange < 0.0) { /* auto-select range */ if(vdivstep <= 0.0) { /* user cares only about number of steps */ vdivrange = 1.0; vdivstep = vdivrange/vdivsteps; } else { /* user doesn't care about range */ vdivrange = vdivstep; vdivsteps = 2; } } else { /* user-speficied range */ if(vdivstep <= 0.0) { /* range and steps specified */ if(vdivsteps <=1 ) vdivsteps = 2; vdivstep = vdivrange/(vdivsteps-1); } else { /* everything specified */ } } } if(dispsteps <= 0){ /* auto-select number of steps */ if(dispersion < 0.0) { /* auto-select range */ if(dispstep <= 0.0) { /* user doesn't care about anything */ dispsteps = 1; dispersion = 0.0; dispstep = 0.0; } else { /* user specified stepsize and nothing else */ dispersion = dispstep; dispsteps = 2; } } else { /* user-speficied range */ if(dispstep <= 0.0) { /* range specified, but nothing else */ dispstep = dispersion; dispsteps = 2; } else { /* range and step specified, but not number of steps */ dispsteps = ceil(dispersion/dispstep); } } } else { /* user-specified number of steps */ if(dispersion < 0.0) { /* auto-select range */ if(dispstep <= 0.0) { /* user cares only about number of steps */ dispersion = 1.0; dispstep = dispersion/dispsteps; } else { /* user doesn't care about range */ dispersion = dispstep; dispsteps = 2; } } else { /* user-speficied range */ if(dispstep <= 0.0) { /* range and steps specified */ if(dispsteps <=1 ) dispsteps = 2; dispstep = dispersion/(dispsteps-1); } else { /* everything specified */ } } } if(detector_thicksteps <= 0){ /* auto-select number of steps */ if(detector_thick < 0.0) { /* auto-select range */ if(detector_thickstep <= 0.0) { /* user doesn't care about anything */ detector_thicksteps = 1; detector_thick = 0.0; detector_thickstep = 0.0; } else { /* user specified stepsize and nothing else */ detector_thick = detector_thickstep; detector_thicksteps = 2; } } else { /* user-speficied range */ if(detector_thickstep <= 0.0) { /* range specified, but nothing else */ detector_thicksteps = 2; detector_thickstep = detector_thick/detector_thicksteps; } else { /* range and step specified, but not number of steps */ detector_thicksteps = ceil(detector_thick/detector_thickstep); } } } else { /* user-specified number of steps */ if(detector_thick < 0.0) { /* auto-select range */ if(detector_thickstep <= 0.0) { /* user cares only about number of steps */ detector_thick = 0.5e-6; detector_thickstep = detector_thick/detector_thicksteps; } else { /* user doesn't care about range */ detector_thick = detector_thickstep; detector_thicksteps = 2; } } else { /* user-speficied range */ if(detector_thickstep <= 0.0) { /* range and steps specified */ if(detector_thicksteps <=1 ) detector_thicksteps = 2; detector_thickstep = detector_thick/(detector_thicksteps-1); } else { /* everything specified */ } } } if(mosaic_domains <= 0){ /* auto-select number of domains */ if(mosaic_spread < 0.0) { /* user doesn't care about anything */ mosaic_domains = 1; mosaic_spread = 0.0; } else { /* user-speficied mosaicity, but not number of domains */ if(mosaic_spread == 0.0) { mosaic_domains = 1; } else { printf("WARNING: finite mosaicity with only one domain! upping to 10 mosaic domains\n"); mosaic_domains = 10; } } } else { /* user-specified number of domains */ if(mosaic_spread < 0.0) { /* number of domains specified, but no spread? */ printf("WARNING: no mosaic spread specified. setting mosaic_domains = 1\n"); mosaic_spread = 0.0; mosaic_domains = 1; } else { /* user-speficied mosaicity and number of domains */ if(mosaic_spread == 0.0) { printf("WARNING: zero mosaic spread specified. setting mosaic_domains = 1\n"); mosaic_domains = 1; } } } /* sanity checks */ if(hdivrange <= 0.0 || hdivstep <= 0.0 || hdivsteps <= 0) { hdivsteps = 1; hdivrange = 0.0; hdivstep = 0.0; } if(vdivrange <= 0.0 || vdivstep <= 0.0 || vdivsteps <= 0) { vdivsteps = 1; vdivrange = 0.0; vdivstep = 0.0; } if(dispersion <= 0.0 || dispstep <= 0.0 || dispsteps <= 0) { dispsteps = 1; dispersion = 0.0; dispstep = 0.0; } if(detector_thick <= 0.0 || detector_thickstep <= 0.0 || detector_thicksteps <= 0) { detector_thicksteps = 1; detector_thick = 0.0; detector_thickstep = 0.0; } /* initialize detector origin from a beam center and distance */ /* there are two conventions here: mosflm and XDS */ if(beam_convention == ADXV) printf("adxv"); if(beam_convention == MOSFLM) printf("mosflm"); if(beam_convention == XDS) printf("xds"); if(beam_convention == DIALS) printf("dials"); if(beam_convention == DENZO) printf("denzo"); if(beam_convention == CUSTOM) printf("custom"); printf(" convention selected.\n"); /* first off, what is the relationship between the two "beam centers"? */ rotate(odet_vector,vector,detector_rotx,detector_roty,detector_rotz); ratio = dot_product(beam_vector,vector); if(ratio == 0.0) { ratio = DBL_MIN; } if(isnan(close_distance)) close_distance = fabs(ratio*distance); distance = close_distance/ratio; if(detector_pivot == SAMPLE){ printf("pivoting detector around sample\n"); /* initialize detector origin before rotating detector */ pix0_vector[1] = -Fclose*fdet_vector[1]-Sclose*sdet_vector[1]+close_distance*odet_vector[1]; pix0_vector[2] = -Fclose*fdet_vector[2]-Sclose*sdet_vector[2]+close_distance*odet_vector[2]; pix0_vector[3] = -Fclose*fdet_vector[3]-Sclose*sdet_vector[3]+close_distance*odet_vector[3]; /* now swing the detector origin around */ rotate(pix0_vector,pix0_vector,detector_rotx,detector_roty,detector_rotz); rotate_axis(pix0_vector,pix0_vector,twotheta_axis,detector_twotheta); } /* now orient the detector plane */ rotate(fdet_vector,fdet_vector,detector_rotx,detector_roty,detector_rotz); rotate(sdet_vector,sdet_vector,detector_rotx,detector_roty,detector_rotz); rotate(odet_vector,odet_vector,detector_rotx,detector_roty,detector_rotz); /* also apply orientation part of twotheta swing */ rotate_axis(fdet_vector,fdet_vector,twotheta_axis,detector_twotheta); rotate_axis(sdet_vector,sdet_vector,twotheta_axis,detector_twotheta); rotate_axis(odet_vector,odet_vector,twotheta_axis,detector_twotheta); /* make sure beam center is preserved */ if(detector_pivot == BEAM){ printf("pivoting detector around direct beam spot\n"); pix0_vector[1] = -Fbeam*fdet_vector[1]-Sbeam*sdet_vector[1]+distance*beam_vector[1]; pix0_vector[2] = -Fbeam*fdet_vector[2]-Sbeam*sdet_vector[2]+distance*beam_vector[2]; pix0_vector[3] = -Fbeam*fdet_vector[3]-Sbeam*sdet_vector[3]+distance*beam_vector[3]; } /* what is the point of closest approach between sample and detector? */ Fclose = -dot_product(pix0_vector,fdet_vector); Sclose = -dot_product(pix0_vector,sdet_vector); close_distance = dot_product(pix0_vector,odet_vector); /* where is the direct beam now? */ /* difference between beam impact vector and detector origin */ newvector[1] = close_distance/ratio*beam_vector[1]-pix0_vector[1]; newvector[2] = close_distance/ratio*beam_vector[2]-pix0_vector[2]; newvector[3] = close_distance/ratio*beam_vector[3]-pix0_vector[3]; /* extract components along detector vectors */ Fbeam = dot_product(fdet_vector,newvector); Sbeam = dot_product(sdet_vector,newvector); distance = close_distance/ratio; /* find origin in XDS convention */ ORGX=Fclose/pixel_size+0.5; ORGY=Sclose/pixel_size+0.5; /* find origin in DIALS convention */ newvector[1]=+0;newvector[2]=+0;newvector[3]=+1; dials_origin[1] = 1000.0*dot_product(pix0_vector,newvector); newvector[1]=+0;newvector[2]=+1;newvector[3]=+0; dials_origin[2] = 1000.0*dot_product(pix0_vector,newvector); newvector[1]=-1;newvector[2]=+0;newvector[3]=+0; dials_origin[3] = 1000.0*dot_product(pix0_vector,newvector); /* find the beam in the detector frame */ newvector[1] = dot_product(beam_vector,fdet_vector); newvector[2] = dot_product(beam_vector,sdet_vector); newvector[3] = dot_product(beam_vector,odet_vector); printf("XDS incident beam: %g %g %g\n",newvector[1],newvector[2],newvector[3]); if(interpolate > 1){ /* no user options */ if(( Na <= 2) || (Nb <= 2) || (Nc <= 2)){ printf("auto-selected tricubic interpolation of structure factors\n"); interpolate = 1; } else { printf("auto-selected no interpolation\n"); interpolate = 0; } } /* user-specified unit cell */ if(user_cell) { /* a few random defaults */ if(b[0] <= 0.0) b[0] = a[0]; if(c[0] <= 0.0) c[0] = a[0]; if(alpha <= 0.0) alpha = M_PI/2; if(beta <= 0.0) beta = M_PI/2; if(gamma <= 0.0) gamma = M_PI/2; /* get cell volume from angles */ aavg = (alpha+beta+gamma)/2; skew = sin(aavg)*sin(aavg-alpha)*sin(aavg-beta)*sin(aavg-gamma); if(skew<0.0) skew=-skew; V_cell = 2.0*a[0]*b[0]*c[0]*sqrt(skew); if(V_cell <= 0.0) { printf("WARNING: impossible unit cell volume: %g\n",V_cell); V_cell = DBL_MIN; } V_star = 1.0/V_cell; /* now get reciprocal-cell lengths from the angles and volume */ a_star[0] = b[0]*c[0]*sin(alpha)*V_star; b_star[0] = c[0]*a[0]*sin(beta)*V_star; c_star[0] = a[0]*b[0]*sin(gamma)*V_star; if(a_star[0] <= 0.0 || b_star[0] <= 0.0 || c_star[0] <= 0.0) { printf("WARNING: impossible reciprocal cell lengths: %g %g %g\n", a_star[0],b_star[0],c_star[0]); a_star[0] = fabs(a_star[0]); b_star[0] = fabs(b_star[0]); c_star[0] = fabs(c_star[0]); if(a_star[0] <= 0.0) a_star[0] = DBL_MIN; if(b_star[0] <= 0.0) b_star[0] = DBL_MIN; if(c_star[0] <= 0.0) c_star[0] = DBL_MIN; } /* for fun, compute the reciprocal-cell angles from direct-cell angles */ sin_alpha_star = a[0]*V_star/b_star[0]/c_star[0]; sin_beta_star = b[0]*V_star/a_star[0]/c_star[0]; sin_gamma_star = c[0]*V_star/a_star[0]/b_star[0]; cos_alpha_star = (cos(beta)*cos(gamma)-cos(alpha))/(sin(beta)*sin(gamma)); cos_beta_star = (cos(gamma)*cos(alpha)-cos(beta))/(sin(gamma)*sin(alpha)); cos_gamma_star = (cos(alpha)*cos(beta)-cos(gamma))/(sin(alpha)*sin(beta)); if(sin_alpha_star>1.0000001 || sin_alpha_star<-1.0000001 || sin_beta_star >1.0000001 || sin_beta_star <-1.0000001 || sin_gamma_star>1.0000001 || sin_gamma_star<-1.0000001 || cos_alpha_star>1.0000001 || cos_alpha_star<-1.0000001 || cos_beta_star >1.0000001 || cos_beta_star <-1.0000001 || cos_gamma_star>1.0000001 || cos_gamma_star<-1.0000001 ) { printf("WARNING: oddball reciprocal cell angles:\n"); printf("sin(alpha_star) = %.25g\n",sin_alpha_star); printf("cos(alpha_star) = %.25g\n",cos_alpha_star); printf("sin(beta_star) = %.25g\n",sin_beta_star); printf("cos(beta_star) = %.25g\n",cos_beta_star); printf("sin(gamma_star) = %.25g\n",sin_gamma_star); printf("cos9gamma_star) = %.25g\n",cos_gamma_star); } if(sin_alpha_star>1.0) sin_alpha_star=1.0; if(sin_beta_star >1.0) sin_beta_star =1.0; if(sin_gamma_star>1.0) sin_gamma_star=1.0; if(sin_alpha_star<-1.0) sin_alpha_star=-1.0; if(sin_beta_star <-1.0) sin_beta_star =-1.0; if(sin_gamma_star<-1.0) sin_gamma_star=-1.0; if(cos_alpha_star*cos_alpha_star>1.0) cos_alpha_star=1.0; if(cos_beta_star *cos_beta_star >1.0) cos_beta_star=1.0; if(cos_gamma_star*cos_gamma_star>1.0) cos_gamma_star=1.0; alpha_star = atan2(sin_alpha_star,cos_alpha_star); beta_star = atan2(sin_beta_star ,cos_beta_star ); gamma_star = atan2(sin_gamma_star,cos_gamma_star); /* construct default orientation */ a_star[1] = a_star[0]; b_star[1] = b_star[0]*cos_gamma_star; c_star[1] = c_star[0]*cos_beta_star; a_star[2] = 0.0; b_star[2] = b_star[0]*sin_gamma_star; c_star[2] = c_star[0]*(cos_alpha_star-cos_beta_star*cos_gamma_star)/sin_gamma_star; a_star[3] = 0.0; b_star[3] = 0.0; c_star[3] = c_star[0]*V_cell/(a[0]*b[0]*c[0]*sin_gamma_star); } /* load the lattice orientation (reciprocal cell vectors) from a mosflm matrix */ if(matfilename != NULL) { infile = fopen(matfilename,"r"); if(infile != NULL) { printf("reading %s\n",matfilename); if(! fscanf(infile,"%lg%lg%lg",a_star+1,b_star+1,c_star+1)) {perror("fscanf");}; if(! fscanf(infile,"%lg%lg%lg",a_star+2,b_star+2,c_star+2)) {perror("fscanf");}; if(! fscanf(infile,"%lg%lg%lg",a_star+3,b_star+3,c_star+3)) {perror("fscanf");}; fclose(infile); /* mosflm A matrix includes the wavelength, so remove it */ /* calculate reciprocal cell lengths, store in 0th element */ vector_scale(a_star,a_star,1e-10/lambda0); vector_scale(b_star,b_star,1e-10/lambda0); vector_scale(c_star,c_star,1e-10/lambda0); } } /* check for flag to generate random missetting angle */ if(misset[0] == -1.0) { /* use spherical cap as sphere to generate random orientation in umat */ mosaic_rotation_umat(90.0, umat, &seed); /* get the missetting angles, in case we want to use them again on -misset option */ umat2misset(umat,misset); printf("random orientation misset angles: %f %f %f deg\n",misset[1]*RTD,misset[2]*RTD,misset[3]*RTD); /* apply this orientation shift */ //rotate_umat(a_star,a_star,umat); //rotate_umat(b_star,b_star,umat); //rotate_umat(c_star,c_star,umat); /* do not apply again */ misset[0] = 1.0; } /* apply any missetting angle, if not already done */ if(misset[0] > 0.0) { rotate(a_star,a_star,misset[1],misset[2],misset[3]); rotate(b_star,b_star,misset[1],misset[2],misset[3]); rotate(c_star,c_star,misset[1],misset[2],misset[3]); } /* various cross products */ cross_product(a_star,b_star,a_star_cross_b_star); cross_product(b_star,c_star,b_star_cross_c_star); cross_product(c_star,a_star,c_star_cross_a_star); /* reciprocal lattice vector "a_star" is defined as perpendicular to both b and c, and must also preserve volume converse is true for direct-space lattice: a is perpendicular to both b_star and c_star a = ( b_star cross c_star ) / V_star */ /* reciprocal unit cell volume, but is it lambda-corrected? */ V_star = dot_product(a_star,b_star_cross_c_star); /* make sure any user-supplied cell takes */ if(user_cell) { /* a,b,c and V_cell were generated above */ /* force the cross-product vectors to have proper magnitude: b_star X c_star = a*V_star */ vector_rescale(b_star_cross_c_star,b_star_cross_c_star,a[0]/V_cell); vector_rescale(c_star_cross_a_star,c_star_cross_a_star,b[0]/V_cell); vector_rescale(a_star_cross_b_star,a_star_cross_b_star,c[0]/V_cell); V_star = 1.0/V_cell; } /* direct-space cell volume */ V_cell = 1.0/V_star; /* generate direct-space cell vectors, also updates magnitudes */ vector_scale(b_star_cross_c_star,a,V_cell); vector_scale(c_star_cross_a_star,b,V_cell); vector_scale(a_star_cross_b_star,c,V_cell); /* now that we have direct-space vectors, re-generate the reciprocal ones */ cross_product(a,b,a_cross_b); cross_product(b,c,b_cross_c); cross_product(c,a,c_cross_a); vector_scale(b_cross_c,a_star,V_star); vector_scale(c_cross_a,b_star,V_star); vector_scale(a_cross_b,c_star,V_star); /* for fun, calculate the cell angles too */ sin_alpha = a_star[0]*V_cell/b[0]/c[0]; sin_beta = b_star[0]*V_cell/a[0]/c[0]; sin_gamma = c_star[0]*V_cell/a[0]/b[0]; cos_alpha = dot_product(b,c)/b[0]/c[0]; cos_beta = dot_product(a,c)/a[0]/c[0]; cos_gamma = dot_product(a,b)/a[0]/b[0]; if(sin_alpha>1.0000001 || sin_alpha<-1.0000001 || sin_beta >1.0000001 || sin_beta <-1.0000001 || sin_gamma>1.0000001 || sin_gamma<-1.0000001 || cos_alpha>1.0000001 || cos_alpha<-1.0000001 || cos_beta >1.0000001 || cos_beta <-1.0000001 || cos_gamma>1.0000001 || cos_gamma<-1.0000001 ) { printf("WARNING: oddball cell angles:\n"); printf("sin_alpha = %.25g\n",sin_alpha); printf("cos_alpha = %.25g\n",cos_alpha); printf("sin_beta = %.25g\n",sin_beta); printf("cos_beta = %.25g\n",cos_beta); printf("sin_gamma = %.25g\n",sin_gamma); printf("cos_gamma = %.25g\n",cos_gamma); } if(sin_alpha>1.0) sin_alpha=1.0; if(sin_beta >1.0) sin_beta =1.0; if(sin_gamma>1.0) sin_gamma=1.0; if(sin_alpha<-1.0) sin_alpha=-1.0; if(sin_beta <-1.0) sin_beta =-1.0; if(sin_gamma<-1.0) sin_gamma=-1.0; if(cos_alpha*cos_alpha>1.0) cos_alpha=1.0; if(cos_beta *cos_beta >1.0) cos_beta=1.0; if(cos_gamma*cos_gamma>1.0) cos_gamma=1.0; alpha = atan2(sin_alpha,cos_alpha); beta = atan2(sin_beta ,cos_beta ); gamma = atan2(sin_gamma,cos_gamma); /* reciprocal cell angles */ sin_alpha_star = a[0]*V_star/b_star[0]/c_star[0]; sin_beta_star = b[0]*V_star/a_star[0]/c_star[0]; sin_gamma_star = c[0]*V_star/a_star[0]/b_star[0]; cos_alpha_star = dot_product(b_star,c_star)/b_star[0]/c_star[0]; cos_beta_star = dot_product(a_star,c_star)/a_star[0]/c_star[0]; cos_gamma_star = dot_product(a_star,b_star)/a_star[0]/b_star[0]; if(sin_alpha_star>1.0000001 || sin_alpha_star<-1.0000001 || sin_beta_star >1.0000001 || sin_beta_star <-1.0000001 || sin_gamma_star>1.0000001 || sin_gamma_star<-1.0000001 || cos_alpha_star>1.0000001 || cos_alpha_star<-1.0000001 || cos_beta_star >1.0000001 || cos_beta_star <-1.0000001 || cos_gamma_star>1.0000001 || cos_gamma_star<-1.0000001 ) { printf("WARNING: oddball reciprocal cell angles:\n"); printf("sin(alpha_star) = %.25g\n",sin_alpha_star); printf("cos(alpha_star) = %.25g\n",cos_alpha_star); printf("sin(beta_star) = %.25g\n",sin_beta_star); printf("cos(beta_star) = %.25g\n",cos_beta_star); printf("sin(gamma_star) = %.25g\n",sin_gamma_star); printf("cos(gamma_star) = %.25g\n",cos_gamma_star); } if(sin_alpha_star>1.0) sin_alpha_star=1.0; if(sin_beta_star >1.0) sin_beta_star =1.0; if(sin_gamma_star>1.0) sin_gamma_star=1.0; if(sin_alpha_star<-1.0) sin_alpha_star=-1.0; if(sin_beta_star <-1.0) sin_beta_star =-1.0; if(sin_gamma_star<-1.0) sin_gamma_star=-1.0; if(cos_alpha_star*cos_alpha_star>1.0) cos_alpha_star=1.0; if(cos_beta_star *cos_beta_star >1.0) cos_beta_star=1.0; if(cos_gamma_star*cos_gamma_star>1.0) cos_gamma_star=1.0; alpha_star = atan2(sin_alpha_star,cos_alpha_star); beta_star = atan2(sin_beta_star ,cos_beta_star ); gamma_star = atan2(sin_gamma_star,cos_gamma_star); printf("Unit Cell: %g %g %g %g %g %g\n", a[0],b[0],c[0],alpha*RTD,beta*RTD,gamma*RTD); printf("Recp Cell: %g %g %g %g %g %g\n", a_star[0],b_star[0],c_star[0],alpha_star*RTD,beta_star*RTD,gamma_star*RTD); printf("volume = %g A^3\n",V_cell); /* print out the real-space matrix */ printf("real-space cell vectors (Angstrom):\n"); printf(" %-10s %-10s %-10s\n","a","b","c"); printf("X: %11.8f %11.8f %11.8f\n",a[1],b[1],c[1]); printf("Y: %11.8f %11.8f %11.8f\n",a[2],b[2],c[2]); printf("Z: %11.8f %11.8f %11.8f\n",a[3],b[3],c[3]); printf("reciprocal-space cell vectors (Angstrom^-1):\n"); printf(" %-10s %-10s %-10s\n","a_star","b_star","c_star"); printf("X: %11.8f %11.8f %11.8f\n",a_star[1],b_star[1],c_star[1]); printf("Y: %11.8f %11.8f %11.8f\n",a_star[2],b_star[2],c_star[2]); printf("Z: %11.8f %11.8f %11.8f\n",a_star[3],b_star[3],c_star[3]); /* now convert these to meters */ vector_scale(a,a,1e-10); vector_scale(b,b,1e-10); vector_scale(c,c,1e-10); /* define phi=0 mosaic=0 crystal orientation */ vector_scale(a,a0,1.0); vector_scale(b,b0,1.0); vector_scale(c,c0,1.0); /* define phi=0 crystal orientation */ vector_scale(a,ap,1.0); vector_scale(b,bp,1.0); vector_scale(c,cp,1.0); /* now we know the cell, calculate crystal size in meters */ if(sample_x > 0) Na = ceil(sample_x/a[0]); if(sample_y > 0) Nb = ceil(sample_y/b[0]); if(sample_z > 0) Nc = ceil(sample_z/c[0]); if(Na <= 1.0) Na = 1.0; if(Nb <= 1.0) Nb = 1.0; if(Nc <= 1.0) Nc = 1.0; xtalsize_a = a[0]*Na; xtalsize_b = b[0]*Nb; xtalsize_c = c[0]*Nc; printf("crystal is %g x %g x %g microns\n",xtalsize_a*1e6,xtalsize_b*1e6,xtalsize_c*1e6); xtalsize_max = xtalsize_a; if(xtalsize_max < xtalsize_b) xtalsize_max = xtalsize_b; if(xtalsize_max < xtalsize_c) xtalsize_max = xtalsize_c; reciprocal_pixel_size = lambda0*distance/pixel_size; recommended_oversample = ceil(3.0 * xtalsize_max/reciprocal_pixel_size); if(recommended_oversample <= 0) recommended_oversample = 1; if(oversample <= 0) { oversample = recommended_oversample; printf("auto-selected %d-fold oversampling\n",oversample); } if(oversample < recommended_oversample) { printf("WARNING: maximum dimension of sample is %g A\n",xtalsize_max*1e10); printf(" but reciprocal pixel size is %g A\n", reciprocal_pixel_size*1e10 ); printf(" intensity may vary significantly across a pixel!\n"); printf(" recommend -oversample %d to work around this\n",recommended_oversample); } /* rough estimate of sample properties */ sample_x = xtalsize_a; sample_y = xtalsize_b; sample_z = xtalsize_c; volume = sample_x*sample_y*sample_z; density = 1.2e6; molecules = Na*Nb*Nc; molecular_weight = volume*density*Avogadro/molecules; printf("approximate MW = %g\n",molecular_weight); /* load the structure factors */ if(hklfilename == NULL) { /* try to recover Fs from a previous run */ if(Fdumpfile != NULL) { printf("reading Fs from %s\n",dumpfilename); // n=0; if(! fscanf(Fdumpfile,"%d%d%d%d%d%d\n\f",&h_min,&h_max,&k_min,&k_max,&l_min,&l_max) ) {perror("fscanf");}; h_range = h_max - h_min + 1; k_range = k_max - k_min + 1; l_range = l_max - l_min + 1; Fhkl = (double***) calloc(h_range+1,sizeof(double**)); for (h0=0; h0<=h_range;h0++) { *(Fhkl +h0) = (double**) calloc(k_range+1,sizeof(double*)); for (k0=0; k0<=k_range;k0++) { *(*(Fhkl +h0)+k0) = (double*) calloc(l_range+1,sizeof(double)); if(! fread(*(*(Fhkl +h0)+k0),sizeof(double),l_range+1,Fdumpfile) ) { perror("fscanf"); }; // n+=l_range; } } fclose(Fdumpfile); hkls = h_range*k_range*l_range; } else { /* no hkl file and no dumpfile */ if(default_F == 0.0) { printf("ERROR: no hkl file and no dump file to read."); exit(9); } } } else { infile = fopen(hklfilename,"r"); if(infile == NULL) { printf("ERROR: unable to open %s.",hklfilename); exit(9); } hkls = 0; h_min=k_min=l_min=1e9; h_max=k_max=l_max=-1e9; printf("counting entries in %s\n",hklfilename); while(4 == fscanf(infile,"%lg%lg%lg%lg",&h,&k,&l,&F_cell)){ if(h != ceil(h-0.4)) printf("WARNING: non-integer value for h (%g) at line %d\n",h,hkls); if(k != ceil(k-0.4)) printf("WARNING: non-integer value for k (%g) at line %d\n",k,hkls); if(l != ceil(l-0.4)) printf("WARNING: non-integer value for l (%g) at line %d\n",l,hkls); if(h_min > h) h_min = h; if(k_min > k) k_min = k; if(l_min > l) l_min = l; if(h_max < h) h_max = h; if(k_max < k) k_max = k; if(l_max < l) l_max = l; ++hkls; } rewind(infile); h_range = h_max - h_min + 1; k_range = k_max - k_min + 1; l_range = l_max - l_min + 1; if(h_range < 0 || k_range < 0 || l_range < 0) { printf("h: %d - %d\n",h_min,h_max); printf("k: %d - %d\n",k_min,k_max); printf("l: %d - %d\n",l_min,l_max); printf("ERROR: not enough HKL indices in %s\n",hklfilename); exit(9); } /* allocate memory for 3d arrays */ //printf("allocating %d %d-byte double**\n",h_range+1,sizeof(double**)); Fhkl = (double***) calloc(h_range+1,sizeof(double**)); if(Fhkl==NULL){perror("ERROR");exit(9);}; for (h0=0; h0<=h_range;h0++) { //printf("allocating %d %d-byte double*\n",k_range+1,sizeof(double*)); Fhkl[h0] = (double**) calloc(k_range+1,sizeof(double*)); if(Fhkl[h0]==NULL){perror("ERROR");exit(9);}; for (k0=0; k0<=k_range;k0++) { //printf("allocating %d %d-byte double\n",k_range+1,sizeof(double)); Fhkl[h0][k0] = (double*) calloc(l_range+1,sizeof(double)); if(Fhkl[h0][k0]==NULL){perror("ERROR");exit(9);}; } } if(default_F != 0.0) { printf("initializing to default_F = %g:\n",default_F); for (h0=0; h0<h_range;h0++) { for (k0=0; k0<k_range;k0++) { for (l0=0; l0<l_range;l0++) { Fhkl[h0][k0][l0] = default_F; } } } printf("done initializing:\n"); } printf("re-reading %s\n",hklfilename); while(4 == fscanf(infile,"%d%d%d%lg",&h0,&k0,&l0,&F_cell)){ Fhkl[h0-h_min][k0-k_min][l0-l_min]=F_cell; } fclose(infile); // for(h0=h_min;h0<=h_max;++h0){ // for(k0=k_min;k0<=k_max;++k0){ // for(l0=l_min;l0<=l_max;++l0){ // if ( (h0<=h_max) && (h0>=h_min) && (k0<=k_max) && (k0>=k_min) && (l0<=l_max) && (l0>=l_min) ) { // /* just take nearest-neighbor */ // F_cell = Fhkl[h0-h_min][k0-k_min][l0-l_min]; // } // else // { // F_cell = 0.0; // } // printf("%d %d %d = %f\n",h0,k0,l0,F_cell); // } // } // } /* make dump file */ outfile = fopen(dumpfilename,"wb"); if(outfile == NULL) { printf("WARNING: unable to open dump file: %s\n",dumpfilename); } else { printf("writing dump file for next time: %s\n",dumpfilename); fprintf(outfile,"%d %d %d %d %d %d\n\f",h_min,h_max,k_min,k_max,l_min,l_max); for (h0=0; h0<=h_range;h0++) { for (k0=0; k0<=k_range;k0++) { fwrite(*(*(Fhkl +h0)+k0),sizeof(double),l_range+1,outfile); } } fclose(outfile); } } /* no point in interpolating if nothing to interpolate */ if(hkls == 0) interpolate = 0; if(interpolate){ /* allocate interpolation array */ sub_Fhkl = (double***) calloc(6,sizeof(double**)); for (h0=0; h0<=5;h0++) { *(sub_Fhkl +h0) = (double**) calloc(6,sizeof(double*)); for (k0=0; k0<=5;k0++) { *(*(sub_Fhkl +h0)+k0) = (double*) calloc(6,sizeof(double)); } } } /* now read in amorphous material structure factors */ stols = 0; if(stolfilename != NULL) { printf("reading %s\n",stolfilename); stols = read_text_file(stolfilename,2,&stol_of,&F_of); if(stols == 0){ perror("no data in input file"); exit(9); } } if(stols == 0 && water_size != 0.0) { /* do something clever here */ } if(stols > 0) { /* add two values at either end for interpolation */ stols += 4; F_highangle = NAN; for(i=stols-3;i>1;--i){ stol_of[i] = stol_of[i-2] * stol_file_mult; F_of[i] = F_of[i-2]; if(! isnan(F_of[i])) { F_lowangle = F_of[i]; if(isnan(F_highangle)) { F_highangle = F_of[i]; } } else { /* missing values are zero */ F_of[i] = 0.0; } } stol_of[0] = -1e99; stol_of[1] = -1e98; F_of[0] = F_of[1] = F_lowangle; stol_of[stols-2] = 1e98; stol_of[stols-1] = 1e99; F_of[stols-1] = F_of[stols-2] = F_highangle; } /* print out detector sensor thickness with sweep over all sensor layers */ for(thick_tic=0;thick_tic<detector_thicksteps;++thick_tic){ printf("thick%d = %g um\n",thick_tic,detector_thickstep*thick_tic*1e6); } /* show phi steps with sweep over spindle axis */ for(phi_tic = 0; phi_tic < phisteps; ++phi_tic){ phi = phi0 + phistep*phi_tic; printf("phi%d = %g\n",phi_tic,phi*RTD); } /* import sources from user file */ sources = 0; if(sourcefilename != NULL) { sources = read_text_file(sourcefilename,5,&source_X,&source_Y,&source_Z,&source_I,&source_lambda); if(sources == 0) { perror("reading source definition file"); exit(9); } /* apply defaults to missing values */ for(source=0;source<sources;++source){ if(isnan(source_X[source])) { source_X[source] = -source_distance*beam_vector[1]; } if(isnan(source_Y[source])) { source_Y[source] = -source_distance*beam_vector[2]; } if(isnan(source_Z[source])) { source_Z[source] = -source_distance*beam_vector[3]; } if(isnan(source_I[source])) { source_I[source] = 1.0; } if(isnan(source_lambda[source])) { source_lambda[source] = lambda0; } } } if(sources == 0) { /* generate generic list of sources */ /* count divsteps sweep over solid angle of beam divergence */ divsteps = 0; for(hdiv_tic=0;hdiv_tic<hdivsteps;++hdiv_tic){ for(vdiv_tic=0;vdiv_tic<vdivsteps;++vdiv_tic){ hdiv = hdivstep * hdiv_tic - hdivrange/2.0 ; vdiv = vdivstep * vdiv_tic - vdivrange/2.0 ; /* force an elliptical divergence */ test = (hdiv*hdiv-hdivstep*hdivstep/4.0*(1-hdivsteps%2))/hdivrange/hdivrange ; test += (vdiv*vdiv-vdivstep*vdivstep/4.0*(1-vdivsteps%2))/vdivrange/vdivrange ; if( round_div && test*4.0 > 1.1) continue; ++divsteps; printf("divergence deviation: %g %g\n",hdiv,vdiv); } } /* print out wavelength steps with sweep over spectral dispersion */ for(disp_tic=0;disp_tic<dispsteps;++disp_tic){ lambda = lambda0 * ( 1.0 + dispstep * disp_tic - dispersion/2.0 ) ; printf("lambda%d = %.15g\n",disp_tic,lambda); } /* allocate enough space */ sources = divsteps*dispsteps; source_X = (double *) calloc(sources+10,sizeof(double)); source_Y = (double *) calloc(sources+10,sizeof(double)); source_Z = (double *) calloc(sources+10,sizeof(double)); source_I = (double *) calloc(sources+10,sizeof(double)); source_lambda = (double *) calloc(sources+10,sizeof(double)); /* now actually create the source entries */ weight = 1.0/sources; sources = 0; for(hdiv_tic=0;hdiv_tic<hdivsteps;++hdiv_tic){ for(vdiv_tic=0;vdiv_tic<vdivsteps;++vdiv_tic){ hdiv = hdivstep * hdiv_tic - hdivrange/2.0 ; vdiv = vdivstep * vdiv_tic - vdivrange/2.0 ; /* force an elliptical divergence */ test = (hdiv*hdiv-hdivstep*hdivstep/4.0*(1-hdivsteps%2))/hdivrange/hdivrange ; test += (vdiv*vdiv-vdivstep*vdivstep/4.0*(1-vdivsteps%2))/vdivrange/vdivrange ; if( round_div && test*4.0 > 1.1) continue; /* construct unit vector along "beam" */ vector[1] = -source_distance*beam_vector[1]; vector[2] = -source_distance*beam_vector[2]; vector[3] = -source_distance*beam_vector[3]; /* divergence is in angle space */ /* define "horizontal" as the E-vector of the incident beam */ rotate_axis(vector,newvector,polar_vector,vdiv); rotate_axis(newvector,vector,vert_vector,hdiv); /* one source at each position for each wavelength */ for(disp_tic=0;disp_tic<dispsteps;++disp_tic){ lambda = lambda0 * ( 1.0 + dispstep * disp_tic - dispersion/2.0 ) ; source_X[sources] = vector[1]; source_Y[sources] = vector[2]; source_Z[sources] = vector[3]; source_I[sources] = weight; source_lambda[sources] = lambda; ++sources; } } } } printf(" created a total of %d sources:\n",sources); for(source=0;source<sources;++source){ /* retrieve stuff from cache */ X = vector[1] = source_X[source]; Y = vector[2] = source_Y[source]; Z = vector[3] = source_Z[source]; I = source_I[source]; lambda = source_lambda[source]; /* make sure these are unit vectors */ unitize(vector,vector); source_X[source] = vector[1]; source_Y[source] = vector[2]; source_Z[source] = vector[3]; printf("%g %g %g %g %g\n",X,Y,Z,I,lambda); } /* allocate enough space */ mosaic_umats = (double *) calloc(mosaic_domains+10,9*sizeof(double)); /* now actually create the orientation of each domain */ for(mos_tic=0;mos_tic<mosaic_domains;++mos_tic){ mosaic_rotation_umat(mosaic_spread, mosaic_umats+9*mos_tic, &mosaic_seed); if(mos_tic==0) { /* force at least one domain to be "aligned"? */ mosaic_umats[0]=1.0;mosaic_umats[1]=0.0;mosaic_umats[2]=0.0; mosaic_umats[3]=0.0;mosaic_umats[4]=1.0;mosaic_umats[5]=0.0; mosaic_umats[6]=0.0;mosaic_umats[7]=0.0;mosaic_umats[8]=1.0; } // printf("%d diagonal %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9],mosaic_umats[mos_tic*9+4],mosaic_umats[mos_tic*9+8]); printf("%d by: %f deg\n",mos_tic,acos((mosaic_umats[mos_tic*9]+mosaic_umats[mos_tic*9+4]+mosaic_umats[mos_tic*9+8]-1)/2)*RTD); // umat2misset(mosaic_umats+9*mos_tic,mosaic_missets); // printf("%d by: %f %f %f deg\n",mos_tic,mosaic_missets[1]*RTD,mosaic_missets[2]*RTD,mosaic_missets[3]*RTD); // printf("%f %f %f\n",mos_tic,*(mosaic_umats+9*mos_tic+0),*(mosaic_umats+9*mos_tic+1),*(mosaic_umats+9*mos_tic+2)); // printf("%f %f %f\n",mos_tic,*(mosaic_umats+9*mos_tic+3),*(mosaic_umats+9*mos_tic+4),*(mosaic_umats+9*mos_tic+5)); // printf("%f %f %f\n",mos_tic,*(mosaic_umats+9*mos_tic+6),*(mosaic_umats+9*mos_tic+7),*(mosaic_umats+9*mos_tic+8)); } printf(" created a total of %d mosaic domains\n",mosaic_domains); /* final decisions about sampling */ if(oversample <= 0) oversample = 1; steps = sources*mosaic_domains*phisteps*oversample*oversample; subpixel_size = pixel_size/oversample; printf(" %d initialized hkls (all others =%g)\n",hkls,default_F); printf(" "); if(xtal_shape == ROUND) printf("ellipsoidal"); if(xtal_shape == SQUARE) printf("parallelpiped"); if(xtal_shape == GAUSS ) printf("gaussian"); if(xtal_shape == TOPHAT) printf("tophat-spot"); printf(" xtal: %.0fx%.0fx%.0f cells\n",Na,Nb,Nc); printf(" wave=%g meters +/- %g%% in %d steps\n",lambda0,dispersion*100,dispsteps); if(nopolar) { printf(" polarization effect disabled\n"); } else { printf(" Kahn polarization factor: %f\n",polarization); } if(curved_detector) printf(" curved detector: all pixels same distance from origin\n"); if(point_pixel) printf(" pixel obliquity effect disabled\n"); printf(" incident fluence: %lg photons/m^2\n",fluence); printf(" distance=%lg detsize=%lgx%lg pixel=%lg meters (%dx%d pixels)\n",distance,detsize_f,detsize_s,pixel_size,fpixels,spixels); printf(" Xbeam=%lg Ybeam=%lg\n",Xbeam,Ybeam); printf(" Fbeam=%lg Sbeam=%lg\n",Fbeam,Sbeam); printf(" Xclose=%lg Yclose=%lg\n",Xclose,Yclose); printf(" Fclose=%lg Sclose=%lg\n",Fclose,Sclose); printf(" DIRECTION_OF_DETECTOR_X-AXIS= %g %g %g\n",fdet_vector[1],fdet_vector[2],fdet_vector[3]); printf(" DIRECTION_OF_DETECTOR_Y-AXIS= %g %g %g\n",sdet_vector[1],sdet_vector[2],sdet_vector[3]); printf(" DIRECTION_OF_DETECTOR_Z-AXIS= %g %g %g\n",odet_vector[1],odet_vector[2],odet_vector[3]); printf(" INCIDENT_BEAM_DIRECTION= %g %g %g\n",beam_vector[1],beam_vector[2],beam_vector[3]); printf(" spindle ROTATION_AXIS= %g %g %g\n",spindle_vector[1],spindle_vector[2],spindle_vector[3]); cross_product(beam_vector,polar_vector,vector); printf(" POLARIZATION_PLANE_NORMAL= %g %g %g\n",vector[1],vector[2],vector[3]); printf(" dials origin= %g %g %g\n",dials_origin[1],dials_origin[2],dials_origin[3]); printf(" roi: %d < x < %d && %d < y < %d\n",roi_xmin,roi_xmax,roi_ymin,roi_ymax); printf(" hdivrange=%g hdivstep=%g radians\n",hdivrange,hdivstep); printf(" vdivrange=%g vdivstep=%g radians\n",vdivrange,vdivstep); printf(" %d divergence steps\n",divsteps); printf(" %d sources\n",sources); printf(" %d mosaic domains over mosaic spread of %g degrees\n",mosaic_domains,mosaic_spread*RTD); printf(" %d phi steps from %g to %g degrees\n",phisteps,phi0*RTD,(phi0+osc)*RTD); printf(" %dx%d pixel oversample steps",oversample,oversample); if(oversample_thick) printf(" +thick"); if(oversample_polar) printf(" +polar"); if(oversample_omega) printf(" +omega"); printf("\n"); if(maskimage != NULL) printf(" skipping zero-flagged pixels in %s\n",maskfilename); // printf(" coherent source: %d\n",coherent); if(calculate_noise){ printf("\n noise image paramters:\n"); printf(" seed: %ld\n",seed); printf(" water droplet size: %g m\n",water_size); } /* pre-calculaate background from something amorphous */ F_bg = water_F; I_bg = F_bg*F_bg*r_e_sqr*fluence*water_size*water_size*water_size*1e6*Avogadro/water_MW; /* sweep over detector */ sum = sumsqr = 0.0; sumn = 0; progress_pixel = 0; omega_sum = 0.0; #if defined(_OPENMP) // omp_set_num_threads(72); #endif int debug_printed_thread = 0; int debug_printed = 0; #pragma omp parallel for \ schedule(auto) \ private(fpixel,spixel)\ firstprivate(imgidx,subS,subF,Fdet,Sdet,Fdet0,Sdet0,Odet,stol,twotheta,\ theta,vector,newvector,pixel_pos,\ airpath,source_path,lambda,\ diffracted,diffracted0,d_r,incident,scattering,parallax,\ fdet_vector,sdet_vector,odet_vector,beam_vector,pix0_vector,polar_vector,spindle_vector,\ hdiv_tic,vdiv_tic,disp_tic,mos_tic,phi_tic,thick_tic,source,\ phi,\ phi0,osc,phistep,phisteps,\ a,b,c,ap,bp,cp,a_star,b_star,c_star,a_cross_b,b_cross_c,c_cross_a,\ h,k,l,h0,k0,l0,h0_flr,k0_flr,l0_flr,\ h_interp,k_interp,l_interp,h_interp_d,k_interp_d,l_interp_d,hrad_sqr,\ i1,i2,i3,\ Ewald0,Ewald,relp,\ xd,yd,zd,xd0,yd0,zd0,\ capture_fraction,\ I,I_bg,F_bg,\ F_cell,F_latt,polar,omega_pixel,\ test,i,sub_Fhkl,\ Fhkl,\ debug_printed_thread)\ shared(debug_printed,\ floatimage,maskimage,\ fpixels,spixels,pixels,pixel_size,subpixel_size,\ oversample,oversample_thick,oversample_polar,oversample_omega,\ Xbeam,Ybeam,\ interpolate,integral_form,curved_detector,\ polarization,nopolar,\ point_pixel,coherent,babble,\ distance,close_distance,\ source_X,source_Y,source_Z,source_lambda,\ sources,\ progress_meter,progress_pixels,\ a0,b0,c0,V_cell,\ Na,Nb,Nc,\ h_min,h_max,h_range,k_min,k_max,k_range,l_min,l_max,l_range,hkls,\ dmin,\ xtal_shape,fudge,\ fluence,r_e_sqr,\ lambda0,dispersion,dispstep,dispsteps,\ source_distance,\ default_F,water_F,water_size,water_MW,\ steps,\ hdiv,hdivrange,hdivstep,hdivsteps,vdiv,vdivrange,vdivstep,vdivsteps,round_div,\ mosaic_spread,mosaic_umats,mosaic_domains,\ detector_thick,detector_thickstep,detector_thicksteps,detector_mu,\ roi_xmin,roi_xmax,roi_ymin,roi_ymax,\ max_I,max_I_x,max_I_y,\ printout,printout_fpixel,printout_spixel,stdout)\ reduction(+:sum,sumsqr,sumn,omega_sum,progress_pixel)\ default(none) for(spixel=0;spixel<spixels;++spixel) { #if defined(_OPENMP) //if(! debug_printed) { // debug_printed = 1; // printf("OMP: %d of %d threads\n", omp_get_thread_num(),omp_get_num_threads()); //} if(! debug_printed_thread) { /* avoid memory contention: make a copy of each dynamically-allocated array for each thread * double *newptr; double **newpptr; double ***newFhkl; newptr = (double *) calloc((h_range+1)*(k_range+1)*(l_range+1),sizeof(double)); newpptr = (double **) calloc((h_range+1)*(k_range+1),sizeof(double *)); newFhkl = (double ***) calloc((h_range+1),sizeof(double **)); for (h0=0; h0<=h_range;h0++) { newFhkl[h0] = newpptr; for (k0=0; k0<=k_range;k0++) { newFhkl[h0][k0] = newptr; memcpy(newptr,*(*(Fhkl +h0)+k0),(l_range+1)*sizeof(double)); newptr += l_range+1; } ++newpptr; } Fhkl = newFhkl; /* */ // newptr = (double *) calloc(sources+10,sizeof(double)); // memcpy(newptr,source_X,sources*sizeof(double)); // source_X = newptr; // newptr = (double *) calloc(sources+10,sizeof(double)); // memcpy(newptr,source_Y,sources*sizeof(double)); // source_Y = newptr; // newptr = (double *) calloc(sources+10,sizeof(double)); // memcpy(newptr,source_Z,sources*sizeof(double)); // source_Z = newptr; // newptr = (double *) calloc(sources+10,sizeof(double)); // memcpy(newptr,source_lambda,sources*sizeof(double)); // source_lambda = newptr; // newptr = (double *) calloc(mosaic_domains+10,9*sizeof(double)); // memcpy(newptr,mosaic_umats,9*mosaic_domains*sizeof(double)); // printf("thread: %d mosaic_umats = %p\n", omp_get_thread_num(),mosaic_umats); // mosaic_umats = newptr; printf("thread: %d mosaic_umats = %p\n", omp_get_thread_num(),mosaic_umats); debug_printed_thread = 1; } #endif for(fpixel=0;fpixel<fpixels;++fpixel) { /* allow for just one part of detector to be rendered */ if(fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax) { continue; } /* position in pixel array */ imgidx = spixel*fpixels+fpixel; /* allow for the use of a mask */ if(maskimage != NULL) { /* skip any flagged pixels in the mask */ if(maskimage[imgidx] == 0) { continue; } } /* reset uncorrected photon count for this pixel */ I = I_bg; /* reset polarization factor, in case we want to cache it */ polar = 0.0; if (nopolar) polar = 1.0; /* reset pixel solid angle, in case we want to cache it */ omega_pixel = 0.0; /* add this now to avoid problems with skipping later? */ // floatimage[imgidx] = I_bg; /* loop over detector layers */ for(thick_tic=0;thick_tic<detector_thicksteps;++thick_tic) { /* assume "distance" is to the front of the detector sensor layer */ Odet = thick_tic*detector_thickstep; /* reset capture fraction, in case we want to cache it */ capture_fraction = 0.0; /* or if we are not modelling detector thickness */ if(detector_thick == 0.0) capture_fraction = 1.0; /* loop over sub-pixels */ for(subS=0;subS<oversample;++subS) { for(subF=0;subF<oversample;++subF) { /* absolute mm position on detector (relative to its origin) */ Fdet = subpixel_size*(fpixel*oversample + subF ) + subpixel_size/2.0; Sdet = subpixel_size*(spixel*oversample + subS ) + subpixel_size/2.0; // Fdet = pixel_size*fpixel; // Sdet = pixel_size*spixel; /* construct detector subpixel position in 3D space */ // pixel_X = distance; // pixel_Y = Sdet-Ybeam; // pixel_Z = Fdet-Xbeam; pixel_pos[1] = Fdet*fdet_vector[1]+Sdet*sdet_vector[1]+Odet*odet_vector[1]+pix0_vector[1]; pixel_pos[2] = Fdet*fdet_vector[2]+Sdet*sdet_vector[2]+Odet*odet_vector[2]+pix0_vector[2]; pixel_pos[3] = Fdet*fdet_vector[3]+Sdet*sdet_vector[3]+Odet*odet_vector[3]+pix0_vector[3]; pixel_pos[0] = 0.0; if(curved_detector) { /* construct detector pixel that is always "distance" from the sample */ vector[1] = distance*beam_vector[1]; vector[2] = distance*beam_vector[2] ; vector[3] = distance*beam_vector[3]; /* treat detector pixel coordinates as radians */ rotate_axis(vector,newvector,sdet_vector,pixel_pos[2]/distance); rotate_axis(newvector,pixel_pos,fdet_vector,pixel_pos[3]/distance); // rotate(vector,pixel_pos,0,pixel_pos[3]/distance,pixel_pos[2]/distance); } /* construct the diffracted-beam unit vector to this sub-pixel */ airpath = unitize(pixel_pos,diffracted); /* solid angle subtended by a pixel: (pix/airpath)^2*cos(2theta) */ if(omega_pixel == 0.0 || oversample_omega) { /* this is either the first time for this pixel, or we are oversampling omega */ omega_pixel = pixel_size*pixel_size/airpath/airpath*close_distance/airpath; /* option to turn off obliquity effect, inverse-square-law only */ if(point_pixel) omega_pixel = 1.0/airpath/airpath; } /* keep track for final statistics */ omega_sum += omega_pixel; /* now calculate detector thickness effects */ if(capture_fraction == 0.0 || oversample_thick) { /* inverse of effective thickness increase */ parallax = dot_product(diffracted,odet_vector); /* fraction of incoming photons absorbed by this detector layer */ capture_fraction = exp(-thick_tic*detector_thickstep*detector_mu/parallax) -exp(-(thick_tic+1)*detector_thickstep*detector_mu/parallax); } /* loop over sources now */ for(source=0;source<sources;++source){ /* retrieve stuff from cache */ incident[1] = -source_X[source]; incident[2] = -source_Y[source]; incident[3] = -source_Z[source]; lambda = source_lambda[source]; /* construct the incident beam unit vector while recovering source distance */ /* source arrays should already be unit vectors */ // source_path = unitize(incident,incident); /* construct the scattering vector for this pixel */ scattering[1] = (diffracted[1]-incident[1])/lambda; scattering[2] = (diffracted[2]-incident[2])/lambda; scattering[3] = (diffracted[3]-incident[3])/lambda; /* sin(theta)/lambda is half the scattering vector length */ stol = 0.5*magnitude(scattering); /* rough cut to speed things up when we aren't using whole detector */ if(dmin > 0.0 && stol > 0.0) { if(dmin > 0.5/stol) { continue; } } /* we now have enough to fix the polarization factor */ if (polar == 0.0 || oversample_polar) { /* need to compute polarization factor */ polar = polarization_factor(polarization,incident,diffracted,polar_vector); } /* sweep over phi angles */ for(phi_tic = 0; phi_tic < phisteps; ++phi_tic) { phi = phi0 + phistep*phi_tic; if( phi != 0.0 ) { /* rotate about spindle if neccesary */ rotate_axis(a0,ap,spindle_vector,phi); rotate_axis(b0,bp,spindle_vector,phi); rotate_axis(c0,cp,spindle_vector,phi); } /* enumerate mosaic domains */ for(mos_tic=0;mos_tic<mosaic_domains;++mos_tic) { /* apply mosaic rotation after phi rotation */ if( mosaic_spread > 0.0 ) { rotate_umat(ap,a,&mosaic_umats[mos_tic*9]); rotate_umat(bp,b,&mosaic_umats[mos_tic*9]); rotate_umat(cp,c,&mosaic_umats[mos_tic*9]); } else { a[1]=ap[1];a[2]=ap[2];a[3]=ap[3]; b[1]=bp[1];b[2]=bp[2];b[3]=bp[3]; c[1]=cp[1];c[2]=cp[2];c[3]=cp[3]; } // printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+0],mosaic_umats[mos_tic*9+1],mosaic_umats[mos_tic*9+2]); // printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+3],mosaic_umats[mos_tic*9+4],mosaic_umats[mos_tic*9+5]); // printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+6],mosaic_umats[mos_tic*9+7],mosaic_umats[mos_tic*9+8]); /* construct fractional Miller indicies */ h = dot_product(a,scattering); k = dot_product(b,scattering); l = dot_product(c,scattering); /* round off to nearest whole index */ h0 = ceil(h-0.5); k0 = ceil(k-0.5); l0 = ceil(l-0.5); /* structure factor of the lattice (paralelpiped crystal) F_latt = sin(M_PI*Na*h)*sin(M_PI*Nb*k)*sin(M_PI*Nc*l)/sin(M_PI*h)/sin(M_PI*k)/sin(M_PI*l); */ F_latt = 1.0; if(xtal_shape == SQUARE) { /* xtal is a paralelpiped */ if(Na>1){ F_latt *= sincg(M_PI*h,Na); } if(Nb>1){ F_latt *= sincg(M_PI*k,Nb); } if(Nc>1){ F_latt *= sincg(M_PI*l,Nc); } } else { /* handy radius in reciprocal space, squared */ hrad_sqr = (h-h0)*(h-h0)*Na*Na + (k-k0)*(k-k0)*Nb*Nb + (l-l0)*(l-l0)*Nc*Nc ; } if(xtal_shape == ROUND) { /* use sinc3 for elliptical xtal shape, correcting for sqrt of volume ratio between cube and sphere */ F_latt = Na*Nb*Nc*0.723601254558268*sinc3(M_PI*sqrt( hrad_sqr * fudge ) ); } if(xtal_shape == GAUSS) { /* fudge the radius so that volume and FWHM are similar to square_xtal spots */ F_latt = Na*Nb*Nc*exp(-( hrad_sqr / 0.63 * fudge )); } if(xtal_shape == TOPHAT) { /* make a flat-top spot of same height and volume as square_xtal spots */ F_latt = Na*Nb*Nc*(hrad_sqr*fudge < 0.3969 ); } /* no need to go further if result will be zero? */ if(F_latt == 0.0 && water_size == 0.0) continue; /* find nearest point on Ewald sphere surface? */ if( integral_form ) { if( phi != 0.0 || mos_tic > 0 ) { /* need to re-calculate reciprocal matrix */ /* various cross products */ cross_product(a,b,a_cross_b); cross_product(b,c,b_cross_c); cross_product(c,a,c_cross_a); /* new reciprocal-space cell vectors */ vector_scale(b_cross_c,a_star,1e20/V_cell); vector_scale(c_cross_a,b_star,1e20/V_cell); vector_scale(a_cross_b,c_star,1e20/V_cell); } /* reciprocal-space coordinates of nearest relp */ relp[1] = h0*a_star[1] + k0*b_star[1] + l0*c_star[1]; relp[2] = h0*a_star[2] + k0*b_star[2] + l0*c_star[2]; relp[3] = h0*a_star[3] + k0*b_star[3] + l0*c_star[3]; // d_star = magnitude(relp) /* reciprocal-space coordinates of center of Ewald sphere */ Ewald0[1] = -incident[1]/lambda/1e10; Ewald0[2] = -incident[2]/lambda/1e10; Ewald0[3] = -incident[3]/lambda/1e10; // 1/lambda = magnitude(Ewald0) /* distance from Ewald sphere in lambda=1 units */ vector[1] = relp[1]-Ewald0[1]; vector[2] = relp[2]-Ewald0[2]; vector[3] = relp[3]-Ewald0[3]; d_r = magnitude(vector)-1.0; /* unit vector of diffracted ray through relp */ unitize(vector,diffracted0); /* intersection with detector plane */ xd = dot_product(fdet_vector,diffracted0); yd = dot_product(sdet_vector,diffracted0); zd = dot_product(odet_vector,diffracted0); /* where does the central direct-beam hit */ xd0 = dot_product(fdet_vector,incident); yd0 = dot_product(sdet_vector,incident); zd0 = dot_product(odet_vector,incident); /* convert to mm coordinates */ Fdet0 = distance*(xd/zd) + Xbeam; Sdet0 = distance*(yd/zd) + Ybeam; //printf("GOTHERE %g %g %g %g\n",Fdet,Sdet,Fdet0,Sdet0); test = exp(-( (Fdet-Fdet0)*(Fdet-Fdet0)+(Sdet-Sdet0)*(Sdet-Sdet0) + d_r*d_r )/1e-8); } // end of integral form /* structure factor of the unit cell */ if(interpolate){ h0_flr = floor(h); k0_flr = floor(k); l0_flr = floor(l); if ( ((h-h_min+3)>h_range) || (h-2<h_min) || ((k-k_min+3)>k_range) || (k-2<k_min) || ((l-l_min+3)>l_range) || (l-2<l_min) ) { if(babble){ babble=0; printf ("WARNING: out of range for three point interpolation: h,k,l,h0,k0,l0: %g,%g,%g,%d,%d,%d \n", h,k,l,h0,k0,l0); printf("WARNING: further warnings will not be printed! "); } F_cell = default_F; interpolate=0; } } /* only interpolate if it is safe */ if(interpolate){ /* integer versions of nearest HKL indicies */ h_interp[0]=h0_flr-1; h_interp[1]=h0_flr; h_interp[2]=h0_flr+1; h_interp[3]=h0_flr+2; k_interp[0]=k0_flr-1; k_interp[1]=k0_flr; k_interp[2]=k0_flr+1; k_interp[3]=k0_flr+2; l_interp[0]=l0_flr-1; l_interp[1]=l0_flr; l_interp[2]=l0_flr+1; l_interp[3]=l0_flr+2; /* polin function needs doubles */ h_interp_d[0] = (double) h_interp[0]; h_interp_d[1] = (double) h_interp[1]; h_interp_d[2] = (double) h_interp[2]; h_interp_d[3] = (double) h_interp[3]; k_interp_d[0] = (double) k_interp[0]; k_interp_d[1] = (double) k_interp[1]; k_interp_d[2] = (double) k_interp[2]; k_interp_d[3] = (double) k_interp[3]; l_interp_d[0] = (double) l_interp[0]; l_interp_d[1] = (double) l_interp[1]; l_interp_d[2] = (double) l_interp[2]; l_interp_d[3] = (double) l_interp[3]; /* now populate the "y" values (nearest four structure factors in each direction) */ for (i1=0;i1<4;i1++) { for (i2=0;i2<4;i2++) { for (i3=0;i3<4;i3++) { sub_Fhkl[i1][i2][i3]= Fhkl[h_interp[i1]-h_min][k_interp[i2]-k_min][l_interp[i3]-l_min]; } } } /* run the tricubic polynomial interpolation */ polin3(h_interp_d,k_interp_d,l_interp_d,sub_Fhkl,h,k,l,&F_cell); } if(! interpolate) { if ( hkls && (h0<=h_max) && (h0>=h_min) && (k0<=k_max) && (k0>=k_min) && (l0<=l_max) && (l0>=l_min) ) { /* just take nearest-neighbor */ F_cell = Fhkl[h0-h_min][k0-k_min][l0-l_min]; } else { F_cell = default_F; // usually zero } } /* now we have the structure factor for this pixel */ /* convert amplitudes into intensity (photons per steradian) */ I += F_cell*F_cell*F_latt*F_latt; /* only do this if we need to */ if(oversample_thick) I *= capture_fraction; if(oversample_polar) I *= polar; if(oversample_omega) I *= omega_pixel; } /* end of mosaic loop */ } /* end of phi loop */ } /* end of source loop */ } /* end of sub-pixel y loop */ } /* end of sub-pixel x loop */ } /* end of detector thickness loop */ /* convert pixel intensity into photon units */ test = r_e_sqr*fluence*I/steps; /* do the corrections now, if they haven't been applied already */ if(! oversample_thick) test *= capture_fraction; if(! oversample_polar) test *= polar; if(! oversample_omega) test *= omega_pixel; floatimage[imgidx] += test; /* now keep track of statistics */ if(floatimage[imgidx] > max_I) { max_I = floatimage[imgidx]; max_I_x = Fdet; max_I_y = Sdet; } sum += floatimage[imgidx]; sumsqr += floatimage[imgidx]*floatimage[imgidx]; ++sumn; if( printout ) { if((fpixel==printout_fpixel && spixel==printout_spixel) || printout_fpixel < 0) { twotheta = atan2(sqrt(pixel_pos[2]*pixel_pos[2]+pixel_pos[3]*pixel_pos[3]),pixel_pos[1]); test = sin(twotheta/2.0)/(lambda0*1e10); printf("%4d %4d : stol = %g or %g\n", fpixel,spixel,stol,test); printf("at %g %g %g\n", pixel_pos[1],pixel_pos[2],pixel_pos[3]); printf("hkl= %f %f %f hkl0= %d %d %d\n", h,k,l,h0,k0,l0); printf(" F_cell=%g F_latt=%g I = %g\n", F_cell,F_latt,I); printf("I/steps %15.10g\n", I/steps); printf("polar %15.10g\n", polar); printf("omega %15.10g\n", omega_pixel); printf("capfrac %15.10g\n", capture_fraction); printf("pixel %15.10g\n", floatimage[imgidx]); printf("real-space cell vectors (Angstrom):\n"); printf(" %-10s %-10s %-10s\n","a","b","c"); printf("X: %11.8f %11.8f %11.8f\n",a[1]*1e10,b[1]*1e10,c[1]*1e10); printf("Y: %11.8f %11.8f %11.8f\n",a[2]*1e10,b[2]*1e10,c[2]*1e10); printf("Z: %11.8f %11.8f %11.8f\n",a[3]*1e10,b[3]*1e10,c[3]*1e10); } } else { if(progress_meter && progress_pixels/100 > 0) { if(progress_pixel % ( progress_pixels/20 ) == 0 || ((10*progress_pixel<progress_pixels || 10*progress_pixel>9*progress_pixels) && (progress_pixel % (progress_pixels/100) == 0))) { printf("%lu%% done\n",progress_pixel*100/progress_pixels); fflush(stdout); } } } ++progress_pixel; } } printf("\n"); printf("solid angle subtended by detector = %g steradian ( %g%% sphere)\n",omega_sum/steps,100*omega_sum/steps/4/M_PI); /* do some stats? */ if(sumn<=0) sumn=1; avg = sum/sumn; if(sumn<=1) sumn=2; rms = sqrt(sumsqr/(sumn-1)); sumsqr = 0.0; sumn = 0; for(spixel=0;spixel<spixels;++spixel) { for(fpixel=0;fpixel<fpixels;++fpixel) { /* position in pixel array */ imgidx = spixel*fpixels+fpixel; if(fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax) { continue; } test = floatimage[imgidx]-avg; sumsqr += test*test; ++sumn; } } if(sumn<=1) sumn=2; rmsd = sqrt(sumsqr/(sumn-1)); printf("writing %s as %d %lu-byte floats\n",floatfilename,pixels,sizeof(float)); outfile = fopen(floatfilename,"wb"); if(outfile == NULL) { perror("ERROR: fopen"); exit(9); } fwrite(floatimage,sizeof(float),pixels,outfile); fclose(outfile); /* output as ints */ imgidx = 0; printf("max_I = %g at %g %g\n",max_I,max_I_x,max_I_y); printf("mean= %g rms= %g rmsd= %g\n",avg,rms,rmsd); if(intfile_scale <= 0.0){ intfile_scale = 1.0; if(max_I > 0.0) intfile_scale = 55000.0/max_I; } printf("intfile_scale = %g\n",intfile_scale); for(spixel=0;spixel<spixels;++spixel) { for(fpixel=0;fpixel<fpixels;++fpixel) { if(fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax) { continue; } /* position in pixel array */ imgidx = spixel*fpixels+fpixel; test = floatimage[imgidx] *intfile_scale+adc_offset; if(test > 65535.0) test = 65535.0; if(test < 0.0) test = 0.0; intimage[imgidx] = (unsigned short int) ( floorf(test+0.5) ); // printf("%d %d = %d\n",fpixel,spixel,intimage[imgidx]); } } printf("writing %s as %lu-byte integers\n",intfilename,sizeof(unsigned short int)); outfile = fopen(intfilename,"wb"); if(outfile == NULL) { perror("ERROR: fopen"); exit(9); } fprintf(outfile,"{\nHEADER_BYTES=512;\nDIM=2;\nBYTE_ORDER=%s;\nTYPE=unsigned_short;\n",byte_order); fprintf(outfile,"SIZE1=%d;\nSIZE2=%d;\nPIXEL_SIZE=%g;\nDISTANCE=%g;\n",fpixels,spixels,pixel_size*1000.0,distance*1000.0); fprintf(outfile,"WAVELENGTH=%g;\n",lambda0*1e10); fprintf(outfile,"BEAM_CENTER_X=%g;\nBEAM_CENTER_Y=%g;\n",Xbeam*1000.0,Ybeam*1000); fprintf(outfile,"ADXV_CENTER_X=%g;\nADXV_CENTER_Y=%g;\n",Fbeam*1000.0,(detsize_s-Sbeam)*1000); fprintf(outfile,"MOSFLM_CENTER_X=%g;\nMOSFLM_CENTER_Y=%g;\n",(Sbeam-0.5*pixel_size)*1000.0,(Fbeam-0.5*pixel_size)*1000); fprintf(outfile,"DENZO_X_BEAM=%g;\nDENZO_Y_BEAM=%g;\n",(Sbeam-0.0*pixel_size)*1000.0,(Fbeam-0.0*pixel_size)*1000); fprintf(outfile,"DIALS_ORIGIN=%g,%g,%g\n",dials_origin[1],dials_origin[2],dials_origin[3]); fprintf(outfile,"XDS_ORGX=%g;\nXDS_ORGY=%g;\n",ORGX,ORGY); fprintf(outfile,"CLOSE_DISTANCE=%g;\n",close_distance*1000.0); fprintf(outfile,"PHI=%g;\nOSC_START=%g;\nOSC_RANGE=%g;\n",phi0*RTD,phi0*RTD,osc*RTD); fprintf(outfile,"TWOTHETA=%g;\n",detector_twotheta*RTD); fprintf(outfile,"DETECTOR_SN=000;\n"); fprintf(outfile,"BEAMLINE=fake;\n"); fprintf(outfile,"}\f"); while ( ftell(outfile) < 512 ){ fprintf(outfile," "); }; fwrite(intimage,sizeof(unsigned short int),pixels,outfile); fclose(outfile); if(write_pgm) { /* output as pgm */ imgidx = 0; if(pgm_scale <= 0.0){ pgm_scale = intfile_scale; if(rmsd > 0.0) pgm_scale = 250.0/(5.0*rmsd); } printf("pgm_scale = %g\n",pgm_scale); imgidx = 0; for(spixel=0;spixel<spixels;++spixel) { for(fpixel=0;fpixel<fpixels;++fpixel) { if(fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax) { ++imgidx; continue; } test = floatimage[imgidx] * pgm_scale; if(test > 255.0) test = 255.0; pgmimage[imgidx] = (unsigned char) ( test ); // printf("%d %d = %d\n",fpixel,spixel,pgmimage[imgidx]); ++imgidx; } } printf("writing %s as %lu-byte integers\n",pgmfilename,sizeof(unsigned char)); outfile = fopen(pgmfilename,"wb"); if(outfile == NULL) { perror("ERROR: fopen"); exit(9); } fprintf(outfile, "P5\n%d %d\n", fpixels, spixels); fprintf(outfile, "# pixels scaled by %lg\n", pgm_scale); fprintf(outfile, "255\n"); fwrite(pgmimage,sizeof(unsigned char),pixels,outfile); fclose(outfile); } /* quit now if there is nothing else to do */ if(calculate_noise == 0){ return 0; } /* simulate Poisson noise */ imgidx = 0; sum = 0.0; overloads = 0; for(spixel=0;spixel<spixels;++spixel) { for(fpixel=0;fpixel<fpixels;++fpixel) { if(fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax) { ++imgidx; continue; } test = poidev( floatimage[imgidx], &seed ); sum += test; test += adc_offset; if(test > 65535.0) { test = 65535.0; ++overloads; } intimage[imgidx] = (unsigned short int) test; // printf("%d %d = %d\n",fpixel,spixel,intimage[imgidx]); ++imgidx; } } printf("%.0f photons on noise image (%d overloads)\n",sum,overloads); printf("writing %s as %lu-byte integers\n",noisefilename,sizeof(unsigned short int)); outfile = fopen(noisefilename,"wb"); if(outfile == NULL) { perror("ERROR: fopen"); exit(9); } fprintf(outfile,"{\nHEADER_BYTES=512;\nDIM=2;\nBYTE_ORDER=%s;\nTYPE=unsigned_short;\n",byte_order); fprintf(outfile,"SIZE1=%d;\nSIZE2=%d;\nPIXEL_SIZE=%g;\nDISTANCE=%g;\n",fpixels,spixels,pixel_size*1000.0,distance*1000.0); fprintf(outfile,"WAVELENGTH=%g;\n",lambda0*1e10); fprintf(outfile,"BEAM_CENTER_X=%g;\nBEAM_CENTER_Y=%g;\n",Xbeam*1000.0,Ybeam*1000); fprintf(outfile,"ADXV_CENTER_X=%g;\nADXV_CENTER_Y=%g;\n",Fbeam*1000.0,(detsize_s-Sbeam)*1000); fprintf(outfile,"MOSFLM_CENTER_X=%g;\nMOSFLM_CENTER_Y=%g;\n",(Sbeam-0.5*pixel_size)*1000.0,(Fbeam-0.5*pixel_size)*1000); fprintf(outfile,"DENZO_X_BEAM=%g;\nDENZO_Y_BEAM=%g;\n",(Sbeam+0.0*pixel_size)*1000.0,(Fbeam+0.0*pixel_size)*1000); fprintf(outfile,"DIALS_ORIGIN=%g,%g,%g\n",dials_origin[1],dials_origin[2],dials_origin[3]); fprintf(outfile,"XDS_ORGX=%g;\nXDS_ORGY=%g;\n",ORGX,ORGY); fprintf(outfile,"CLOSE_DISTANCE=%g;\n",close_distance*1000.0); fprintf(outfile,"PHI=%g;\nOSC_START=%g;\nOSC_RANGE=%g;\n",phi0*RTD,phi0*RTD,osc*RTD); fprintf(outfile,"TWOTHETA=%g;\n",detector_twotheta*RTD); fprintf(outfile,"DETECTOR_SN=000;\n"); fprintf(outfile,"BEAMLINE=fake;\n"); fprintf(outfile,"}\f"); while ( ftell(outfile) < 512 ){ fprintf(outfile," "); }; fwrite(intimage,sizeof(unsigned short int),pixels,outfile); fclose(outfile); return 0; } /* Fourier transform of a grating */ double sincg(double x,double N) { if(x==0.0) return N; return sin(x*N)/sin(x); } /* Fourier transform of a sphere */ double sinc3(double x) { if(x==0.0) return 1.0; return 3.0*(sin(x)/x-cos(x))/(x*x); } double sinc_conv_sinc3(double x) { if(x==0.0) return 1.0; return 3.0*(sin(x)-x*cos(x))/(x*x*x); } double *rotate(double *v, double *newv, double phix, double phiy, double phiz) { double rxx,rxy,rxz,ryx,ryy,ryz,rzx,rzy,rzz; double new_x,new_y,new_z,rotated_x,rotated_y,rotated_z; new_x=v[1]; new_y=v[2]; new_z=v[3]; if(phix != 0){ /* rotate around x axis */ //rxx= 1; rxy= 0; rxz= 0; ryx= 0; ryy= cos(phix); ryz=-sin(phix); rzx= 0; rzy= sin(phix); rzz= cos(phix); rotated_x = new_x; rotated_y = new_y*ryy + new_z*ryz; rotated_z = new_y*rzy + new_z*rzz; new_x = rotated_x; new_y = rotated_y; new_z = rotated_z; } if(phiy != 0) { /* rotate around y axis */ rxx= cos(phiy); rxy= 0; rxz= sin(phiy); //ryx= 0; ryy= 1; ryz= 0; rzx=-sin(phiy); rzy= 0; rzz= cos(phiy); rotated_x = new_x*rxx + new_y*rxy + new_z*rxz; rotated_y = new_y; rotated_z = new_x*rzx + new_y*rzy + new_z*rzz; new_x = rotated_x; new_y = rotated_y; new_z = rotated_z; } if(phiz != 0){ /* rotate around z axis */ rxx= cos(phiz); rxy=-sin(phiz); rxz= 0; ryx= sin(phiz); ryy= cos(phiz); ryz= 0; //rzx= 0; rzy= 0; rzz= 1; rotated_x = new_x*rxx + new_y*rxy ; rotated_y = new_x*ryx + new_y*ryy; rotated_z = new_z; new_x = rotated_x; new_y = rotated_y; new_z = rotated_z; } newv[1]=new_x; newv[2]=new_y; newv[3]=new_z; return newv; } /* rotate a point about a unit vector axis */ double *rotate_axis(double *v, double *newv, double *axis, double phi) { double sinphi = sin(phi); double cosphi = cos(phi); double dot = (axis[1]*v[1]+axis[2]*v[2]+axis[3]*v[3])*(1.0-cosphi); double temp[4]; temp[1] = axis[1]*dot+v[1]*cosphi+(-axis[3]*v[2]+axis[2]*v[3])*sinphi; temp[2] = axis[2]*dot+v[2]*cosphi+(+axis[3]*v[1]-axis[1]*v[3])*sinphi; temp[3] = axis[3]*dot+v[3]*cosphi+(-axis[2]*v[1]+axis[1]*v[2])*sinphi; newv[1]=temp[1]; newv[2]=temp[2]; newv[3]=temp[3]; return newv; } /* rotate a vector using a 9-element unitary matrix */ double *rotate_umat(double *v, double *newv, double umat[9]) { double uxx,uxy,uxz,uyx,uyy,uyz,uzx,uzy,uzz; /* for convenience, assign matrix x-y coordinate */ uxx = umat[0]; uxy = umat[1]; uxz = umat[2]; uyx = umat[3]; uyy = umat[4]; uyz = umat[5]; uzx = umat[6]; uzy = umat[7]; uzz = umat[8]; /* rotate the vector (x=1,y=2,z=3) */ newv[1] = uxx*v[1] + uxy*v[2] + uxz*v[3]; newv[2] = uyx*v[1] + uyy*v[2] + uyz*v[3]; newv[3] = uzx*v[1] + uzy*v[2] + uzz*v[3]; return newv; } /* returns a unit vector in a random direction in arguments dx,dy,dz */ /* also returns a random magnitude within the unit sphere as a return value */ float uniform3Ddev(float *dx, float *dy, float *dz, long *seed) { float ran1(long *idum); float dr; /* pick a random direction by cutting a sphere out of a cube */ dr = 0; while(dr>1 || dr < 1e-2) { *dx = 2.1*(ran1(seed)-0.5); *dy = 2.1*(ran1(seed)-0.5); *dz = 2.1*(ran1(seed)-0.5); dr = sqrt(*dx**dx+*dy**dy+*dz**dz); } /* turn this into a unit vector */ *dx/=dr; *dy/=dr; *dz/=dr; /* dx,dy,dz should now be a random unit vector */ return dr; } /* returns a 9-element unitary matrix for a random isotropic rotation on a spherical cap of diameter "mosaicity" */ /* mosaic = 90 deg is a full sphere */ double *mosaic_rotation_umat(float mosaicity, double umat[9], long *seed) { float ran1(long *idum); double r1,r2,r3,xyrad,rot; double v1,v2,v3; double t1,t2,t3,t6,t7,t8,t9,t11,t12,t15,t19,t20,t24; double uxx,uxy,uxz,uyx,uyy,uyz,uzx,uzy,uzz; /* make three random uniform deviates on [-1:1] */ r1= (double) 2.0*ran1(seed)-1.0; r2= (double) 2.0*ran1(seed)-1.0; r3= (double) 2.0*ran1(seed)-1.0; xyrad = sqrt(1.0-r2*r2); rot = mosaicity*powf((1.0-r3*r3),(1.0/3.0)); v1 = xyrad*sin(M_PI*r1); v2 = xyrad*cos(M_PI*r1); v3 = r2; /* commence incomprehensible quaternion calculation */ t1 = cos(rot); t2 = 1.0 - t1; t3 = v1*v1; t6 = t2*v1; t7 = t6*v2; t8 = sin(rot); t9 = t8*v3; t11 = t6*v3; t12 = t8*v2; t15 = v2*v2; t19 = t2*v2*v3; t20 = t8*v1; t24 = v3*v3; /* populate the unitary rotation matrix */ umat[0] = uxx = t1 + t2*t3; umat[1] = uxy = t7 - t9; umat[2] = uxz = t11 + t12; umat[3] = uyx = t7 + t9; umat[4] = uyy = t1 + t2*t15; umat[5] = uyz = t19 - t20; umat[6] = uzx = t11 - t12; umat[7] = uzy = t19 + t20; umat[8] = uzz = t1 + t2*t24; /* return pointer to the provided array, in case that is useful */ return umat; } /* convert a unitary rotation matrix into misseting angles rotx roty rotz are returned as missets[1] missets[2] missets[3] */ double *umat2misset(double umat[9],double *missets) { double uxx,uxy,uxz,uyx,uyy,uyz,uzx,uzy,uzz; double m,mx,my,mz; double xcy_x,xcy_y,xcy_z; double ycz_x,ycz_y,ycz_z; double zcx_x,zcx_y,zcx_z; double rotx,roty,rotz; uxx=umat[0];uxy=umat[1];uxz=umat[2]; uyx=umat[3];uyy=umat[4];uyz=umat[5]; uzx=umat[6];uzy=umat[7];uzz=umat[8]; /* or transpose? */ // uxx=umat[1];uyx=umat[2];uzx=umat[3]; // uxy=umat[4];uyy=umat[5];uzy=umat[6]; // uxz=umat[7];uyz=umat[8];uzz=umat[9]; /* make sure it is unitary */ mx = sqrt(uxx*uxx+uxy*uxy+uxz*uxz); my = sqrt(uyx*uyx+uyy*uyy+uyz*uyz); mz = sqrt(uzx*uzx+uzy*uzy+uzz*uzz); if(mx>0){uxx/=mx;uxy/=mx;uxz/=mx;}; if(my>0){uyx/=my;uyy/=my;uyz/=my;}; if(mz>0){uzx/=mz;uzy/=mz;uzz/=mz;}; if(mx>=0 && my<=0 && mz<=0) { uyx=0;uyy=1;uyz=0; uzx=0;uzy=0;uzz=1; } if(mx<=0 && my>=0 && mz<=0) { uxx=1;uxy=0;uxz=0; uzx=0;uzy=0;uzz=1; } if(mx<=0 && my<=0 && mz>=0) { uxx=1;uxy=0;uxz=0; uyx=0;uyy=1;uyz=0; } /* cross products to check normality */ xcy_x = uxy*uyz - uxz*uyy; xcy_y = uxz*uyx - uxx*uyz; xcy_z = uxx*uyy - uxy*uyx; m=sqrt(xcy_x*xcy_x+xcy_y*xcy_y+xcy_z*xcy_z); if(m>0){xcy_x/=m;xcy_y/=m;xcy_z/=m;}; ycz_x = uyy*uzz - uyz*uzy; ycz_y = uyz*uzx - uyx*uzz; ycz_z = uyx*uzy - uyy*uzx; m=sqrt(ycz_x*ycz_x+ycz_y*ycz_y+ycz_z*ycz_z); if(m>0){ycz_x/=m;ycz_y/=m;ycz_z/=m;}; zcx_x = uzy*uxz - uzz*uxy; zcx_y = uzz*uxx - uzx*uxz; zcx_z = uzx*uxy - uzy*uxx; m=sqrt(zcx_x*zcx_x+zcx_y*zcx_y+zcx_z*zcx_z); if(m>0){zcx_x/=m;zcx_y/=m;zcx_z/=m;}; /* substitute any empty vectors for cross-product of other two */ if(mx<=0){uxx=ycz_x;uxy=ycz_y;uxz=ycz_z;}; if(my<=0){uyx=zcx_x;uyy=zcx_y;uyz=zcx_z;}; if(mz<=0){uzx=xcy_x;uzy=xcy_y;uzz=xcy_z;}; /* cross products to check normality */ xcy_x = uxy*uyz - uxz*uyy; xcy_y = uxz*uyx - uxx*uyz; xcy_z = uxx*uyy - uxy*uyx; m=sqrt(xcy_x*xcy_x+xcy_y*xcy_y+xcy_z*xcy_z); if(m>0){xcy_x/=m;xcy_y/=m;xcy_z/=m;} ycz_x = uyy*uzz - uyz*uzy; ycz_y = uyz*uzx - uyx*uzz; ycz_z = uyx*uzy - uyy*uzx; m=sqrt(ycz_x*ycz_x+ycz_y*ycz_y+ycz_z*ycz_z); if(m>0){ycz_x/=m;ycz_y/=m;ycz_z/=m;}; zcx_x = uzy*uxz - uzz*uxy; zcx_y = uzz*uxx - uzx*uxz; zcx_z = uzx*uxy - uzy*uxx; m=sqrt(zcx_x*zcx_x+zcx_y*zcx_y+zcx_z*zcx_z); if(m>0){zcx_x/=m;zcx_y/=m;zcx_z/=m;}; /* substitute any empty vectors for cross-product of other two */ if(mx<=0){uxx=ycz_x;uxy=ycz_y;uxz=ycz_z;}; if(my<=0){uyx=zcx_x;uyy=zcx_y;uyz=zcx_z;}; if(mz<=0){uzx=xcy_x;uzy=xcy_y;uzz=xcy_z;}; /* make sure it is unitary */ mx = sqrt(uxx*uxx+uxy*uxy+uxz*uxz); my = sqrt(uyx*uyx+uyy*uyy+uyz*uyz); mz = sqrt(uzx*uzx+uzy*uzy+uzz*uzz); if(mx>0){uxx/=mx;uxy/=mx;uxz/=mx;}; if(my>0){uyx/=my;uyy/=my;uyz/=my;}; if(mz>0){uzx/=mz;uzy/=mz;uzz/=mz;}; /* see if its really orthonormal? */ if(uzx*uzx < 1.0) { rotx = atan2(uzy,uzz); roty = atan2(-uzx,sqrt(uzy*uzy+uzz*uzz)); rotz = atan2(uyx,uxx); } else { rotx = atan2(1,1)*4; roty = atan2(1,1)*2; rotz = atan2(uxy,-uyy); } missets[1] = rotx; missets[2] = roty; missets[3] = rotz; return missets; } float poidev(float xm, long *idum) { float gammln(float xx); float ran1(long *idum); /* oldm is a flag for whether xm has changed since last call */ static float sq,alxm,g,oldm=(-1.0); float em,t,y; /* routine below locks up for > 1e6 photons? */ if (xm > 1.0e6) { return xm+sqrt(xm)*gaussdev(idum); } if (xm < 12.0) { /* use direct method: simulate exponential delays between events */ if(xm != oldm) { /* xm is new, compute the exponential */ oldm=xm; g=exp(-xm); } /* adding exponential deviates is equivalent to multiplying uniform deviates */ /* final comparison is to the pre-computed exponential */ em = -1; t = 1.0; do { ++em; t *= ran1(idum); } while (t > g); } else { /* Use rejection method */ if(xm != oldm) { /* xm has changed, pre-compute a few things... */ oldm=xm; sq=sqrt(2.0*xm); alxm=log(xm); g=xm*alxm-gammln(xm+1.0); } do { do { /* y is a deviate from a lorentzian comparison function */ y=tan(M_PI*ran1(idum)); /* shift and scale */ em=sq*y+xm; } while (em < 0.0); /* there are no negative Poisson deviates */ /* round off to nearest integer */ em=floor(em); /* ratio of Poisson distribution to comparison function */ /* scale it back by 0.9 to make sure t is never > 1.0 */ t=0.9*(1.0+y*y)*exp(em*alxm-gammln(em+1.0)-g); } while (ran1(idum) > t); } return em; } /* return gaussian deviate with rms=1 and FWHM = 2/sqrt(log(2)) */ float gaussdev(long *idum) { float ran1(long *idum); static int iset=0; static float gset; float fac,rsq,v1,v2; if (iset == 0) { /* no extra deviats handy ... */ /* so pick two uniform deviates on [-1:1] */ do { v1=2.0*ran1(idum)-1.0; v2=2.0*ran1(idum)-1.0; rsq=v1*v1+v2*v2; } while (rsq >= 1.0 || rsq == 0); /* restrained to the unit circle */ /* apply Box-Muller transformation to convert to a normal deviate */ fac=sqrt(-2.0*log(rsq)/rsq); gset=v1*fac; iset=1; /* we now have a spare deviate */ return v2*fac; } else { /* there is an extra deviate in gset */ iset=0; return gset; } } /* generate Lorentzian deviate with FWHM = 2 */ float lorentzdev(long *seed) { float ran1(long *idum); return tan(M_PI*(ran1(seed)-0.5)); } /* return triangular deviate with FWHM = 1 */ float triangledev(long *seed) { float ran1(long *idum); float value; value = ran1(seed); if(value > 0.5){ value = sqrt(2*(value-0.5))-1; }else{ value = 1-sqrt(2*value); } return value; } float expdev(long *idum) { float dum; do dum=ran1(idum); while( dum == 0.0); return -log(dum); } /* ln of the gamma function */ float gammln(float xx) { double x,y,tmp,ser; static double cof[6]={76.18009172947146,-86.50532032941677, 24.01409824083091,-1.231739572450155, 0.1208650973866179e-2,-0.5395239384953e-5}; int j; y=x=xx; tmp=x+5.5; tmp -= (x+0.5)*log(tmp); ser = 1.000000000190015; for(j=0;j<=5;++j) ser += cof[j]/++y; return -tmp+log(2.5066282746310005*ser/x); } /* returns a uniform random deviate between 0 and 1 */ #define IA 16807 #define IM 2147483647 #define AM (1.0/IM) #define IQ 127773 #define IR 2836 #define NTAB 32 #define NDIV (1+(IM-1)/NTAB) #define EPS 1.2e-7 #define RNMX (1.0-EPS) float ran1(long *idum) { int j; long k; static long iy=0; static long iv[NTAB]; float temp; if (*idum <= 0 || !iy) { /* first time around. don't want idum=0 */ if(-(*idum) < 1) *idum=1; else *idum = -(*idum); /* load the shuffle table */ for(j=NTAB+7;j>=0;j--) { k=(*idum)/IQ; *idum=IA*(*idum-k*IQ)-IR*k; if(*idum < 0) *idum += IM; if(j < NTAB) iv[j] = *idum; } iy=iv[0]; } /* always start here after initializing */ k=(*idum)/IQ; *idum=IA*(*idum-k*IQ)-IR*k; if (*idum < 0) *idum += IM; j=iy/NDIV; iy=iv[j]; iv[j] = *idum; if((temp=AM*iy) > RNMX) return RNMX; else return temp; } void polint(double *xa, double *ya, double x, double *y) { double x0,x1,x2,x3; x0 = (x-xa[1])*(x-xa[2])*(x-xa[3])*ya[0]/((xa[0]-xa[1])*(xa[0]-xa[2])*(xa[0]-xa[3])); x1 = (x-xa[0])*(x-xa[2])*(x-xa[3])*ya[1]/((xa[1]-xa[0])*(xa[1]-xa[2])*(xa[1]-xa[3])); x2 = (x-xa[0])*(x-xa[1])*(x-xa[3])*ya[2]/((xa[2]-xa[0])*(xa[2]-xa[1])*(xa[2]-xa[3])); x3 = (x-xa[0])*(x-xa[1])*(x-xa[2])*ya[3]/((xa[3]-xa[0])*(xa[3]-xa[1])*(xa[3]-xa[2])); *y = x0+x1+x2+x3; } void polin2(double *x1a, double *x2a, double **ya, double x1, double x2, double *y) { void polint(double *xa, double *ya, double x, double *y); int j; double ymtmp[4]; for (j=1;j<=4;j++) { polint(x2a,ya[j-1],x2,&ymtmp[j-1]); } polint(x1a,ymtmp,x1,y); } void polin3(double *x1a, double *x2a, double *x3a, double ***ya, double x1, double x2, double x3, double *y) { void polint(double *xa, double ya[], double x, double *y); void polin2(double *x1a, double *x2a, double **ya, double x1,double x2, double *y); void polin1(double *x1a, double *ya, double x1, double *y); int j; double ymtmp[4]; for (j=1;j<=4;j++) { polin2(x2a,x3a,&ya[j-1][0],x2,x3,&ymtmp[j-1]); } polint(x1a,ymtmp,x1,y); } /* FWHM = integral = 1 */ double ngauss2D(double x,double y) { return log(16.)/M_PI*exp(-log(16.)*(x*x+y*y)); } double ngauss2Dinteg(double x,double y) { return 0.125*(erf(2.*x*sqrt(log(2.)))*erf(y*sqrt(log(16.)))*sqrt(log(16.)/log(2.))); } /* read in multi-column text file to list of double arrays */ /* provide address of undeclared arrays on command line */ size_t read_text_file(char *filename, size_t nargs, ... ) { /* maximum of 10240-character lines? */ char text[10240]; char *token; const char delimiters[] = " \t,;:!"; const char numberstuf[] = "0123456789-+.EGeg"; unsigned long line,lines; unsigned long i,j; double value; double *data; double **pointer; va_list arglist; FILE *infile = NULL; infile = fopen(filename,"r"); if(infile == NULL) { perror("fopen()"); return 0; } lines=0; while ( fgets ( text, sizeof text, infile ) != NULL ) { token = text; token += strspn(token,delimiters); if(strcmp(token,"\n")==0) { //printf("blank\n"); continue; } ++lines; } rewind(infile); /* allocate memory for arrays */ va_start( arglist, nargs); for(i=0;i<nargs;++i){ /* allocate the array */ data = (double*) malloc((lines+10)*sizeof(double)); /* initialize with missing number flags */ for(j=0;j<lines+10;++j) { data[j] = NAN; } /* get argument (pointer to pointer) */ pointer = va_arg(arglist, double **); /* change the value of what the arg points to */ *pointer = data; /* now the pointer provided as an argument points to something */ } va_end(arglist); line = 0; while ( fgets ( text, sizeof text, infile ) != NULL ) { /* read a line */ token = text; token += strspn(token,delimiters); if(strcmp(token,"\n")==0) { //printf("blank\n"); continue; } i=0; va_start( arglist, nargs); do { value=atof(token); /* get argument */ pointer = va_arg(arglist, double **); /* retrieve data array's address */ data = *pointer; data[line] = value; token += strspn(token,numberstuf); if (strcmp(token,"\n")==0) continue; token += strcspn(token,delimiters); token += strspn(token,delimiters); if (strcmp(token,"\n")==0) continue; ++i; if(i>=nargs) { break; } } while (strcmp(token,"\n")!=0) ; va_end(arglist); // printf("initializing:"); // va_start( arglist, nargs); // for(i=0;i<nargs;++i){ // pointer = va_arg(arglist, double **); // data = *pointer; // printf(" %g",data[line]); // } // va_end(arglist); // printf("\n"); ++line; } fclose(infile); return lines; } /* measure magnitude of provided vector */ double magnitude(double *vector) { /* measure the magnitude */ vector[0] = sqrt(vector[1]*vector[1]+vector[2]*vector[2]+vector[3]*vector[3]); return vector[0]; } /* make provided vector a unit vector */ double unitize(double *vector, double *new_unit_vector) { double mag; /* measure the magnitude */ mag = magnitude(vector); if(mag != 0.0){ /* normalize it */ new_unit_vector[1]=vector[1]/mag; new_unit_vector[2]=vector[2]/mag; new_unit_vector[3]=vector[3]/mag; } else { /* can't normalize, report zero vector */ new_unit_vector[0] = 0.0; new_unit_vector[1] = 0.0; new_unit_vector[2] = 0.0; new_unit_vector[3] = 0.0; } return mag; } /* scale magnitude of provided vector */ double vector_scale(double *vector, double *new_vector, double scale) { new_vector[1] = scale*vector[1]; new_vector[2] = scale*vector[2]; new_vector[3] = scale*vector[3]; return magnitude(new_vector); } /* enforce magnitude of provided vector */ double vector_rescale(double *vector, double *new_vector, double new_magnitude) { double oldmag; oldmag = magnitude(vector); if(oldmag <= 0.0) oldmag = 1.0; new_vector[1] = new_magnitude/oldmag*vector[1]; new_vector[2] = new_magnitude/oldmag*vector[2]; new_vector[3] = new_magnitude/oldmag*vector[3]; return magnitude(new_vector); } /* difference between two given vectors */ double vector_diff(double *vector, double *origin_vector, double *new_vector) { new_vector[1] = vector[1]-origin_vector[1]; new_vector[2] = vector[2]-origin_vector[2]; new_vector[3] = vector[3]-origin_vector[3]; return magnitude(new_vector); } /* vector cross product where vector magnitude is 0th element */ double *cross_product(double *x, double *y, double *z) { z[1] = x[2]*y[3] - x[3]*y[2]; z[2] = x[3]*y[1] - x[1]*y[3]; z[3] = x[1]*y[2] - x[2]*y[1]; z[0] = 0.0; return z; } /* vector inner product where vector magnitude is 0th element */ double dot_product(double *x, double *y) { return x[1]*y[1]+x[2]*y[2]+x[3]*y[3]; } /* polarization factor */ double polarization_factor(double kahn_factor, double *incident, double *diffracted, double *axis) { double cos2theta,cos2theta_sqr,sin2theta_sqr; double psi=0; double E_in[4]; double B_in[4]; double E_out[4]; double B_out[4]; unitize(incident,incident); unitize(diffracted,diffracted); unitize(axis,axis); /* component of diffracted unit vector along incident beam unit vector */ cos2theta = dot_product(incident,diffracted); cos2theta_sqr = cos2theta*cos2theta; sin2theta_sqr = 1-cos2theta_sqr; if(kahn_factor != 0.0){ /* tricky bit here is deciding which direciton the E-vector lies in for each source here we assume it is closest to the "axis" defined above */ /* cross product to get "vertical" axis that is orthogonal to the cannonical "polarization" */ cross_product(axis,incident,B_in); /* make it a unit vector */ unitize(B_in,B_in); /* cross product with incident beam to get E-vector direction */ cross_product(incident,B_in,E_in); /* make it a unit vector */ unitize(E_in,E_in); /* get components of diffracted ray projected onto the E-B plane */ E_out[0] = dot_product(diffracted,E_in); B_out[0] = dot_product(diffracted,B_in); /* compute the angle of the diffracted ray projected onto the incident E-B plane */ psi = -atan2(B_out[0],E_out[0]); } /* correction for polarized incident beam */ return 0.5*(1.0 + cos2theta_sqr - kahn_factor*cos(2*psi)*sin2theta_sqr); } char *get_byte_order() { static char *byte_order; typedef union { unsigned char string[2]; unsigned short integer; } TWOBYTES; TWOBYTES twobytes; twobytes.integer = 24954; /* determine byte order on this machine */ if(0==strncmp((const char *) twobytes.string, "az", 2)) { byte_order = "big_endian"; } else { byte_order = "little_endian"; } return byte_order; } SMVinfo GetFrame(char *filename) { char *string; SMVinfo frame; char *byte_order = get_byte_order(); // unsigned short int tempint; /* try to open the file... */ frame.handle = fopen(filename, "rb"); if(frame.handle != NULL) { /* just assume header will be 512 bytes?... */ frame.header = (char *) calloc(1024,sizeof(char)); if(! fread(frame.header, 512, 1, frame.handle)) { perror("SMV file header"); exit(9); } string = frame.header + 512; *string = (char) 0; /* remember the file name */ frame.filename = (char *) calloc(strlen(filename)+10,sizeof(char)); strcpy(frame.filename,filename); /* What kind of file is this? */ if(0!=strncmp(frame.header, "{\nHEADER_BYTES= 512;\nDIM=2;\nBYTE_ORDER=", 12)) { /* probably not an ADSC frame */ /* inform the user */ printf("ERROR: %s does not look like an ADSC frame!\n", filename); /* skip this file */ fclose(frame.handle); frame.handle = NULL; } else { /* store the full header */ frame.header_size = (int) ValueOf("HEADER_BYTES",frame); if(frame.header_size != 512) { free(frame.header); fseek(frame.handle,0,SEEK_SET); frame.header = (char *) calloc(2*frame.header_size,sizeof(char)); if(! fread(frame.header, frame.header_size, 1, frame.handle)) { perror("SMV file fread"); exit(9); } string = frame.header + frame.header_size; *string = (char) 0; } /* see if we will need to swap bytes */ string = (char *) strstr(frame.header, "BYTE_ORDER=")+11; /* find last instance of keyword in the header */ while ((char *) strstr(string, "BYTE_ORDER=") != NULL) { string = (char *) strstr(string, "BYTE_ORDER=")+11; } if(0==strncmp(byte_order, string, 10)) { frame.swap_bytes = FALSE; } else { frame.swap_bytes = TRUE; } /* store a couple of things */ frame.width = (int) ValueOf("SIZE1",frame); frame.height = (int) ValueOf("SIZE2",frame); if(frame.width == 0) { /* try other formats? */ frame.width = frame.height = (int) ValueOf("DETECTOR_DIMENSIONS",frame); } // frame.mmapdata = mmap(NULL,2*frame.width*frame.height+frame.header_size,PROT_READ,MAP_SHARED,fileno(frame.handle),0); frame.mmapdata = (unsigned short int *) calloc(2,frame.width*frame.height+frame.header_size); if(frame.mmapdata == NULL) { perror("calloc:"); exit(9); } fseek(frame.handle,0,SEEK_SET); printf("reading %s\n",frame.filename); if(! fread(frame.mmapdata,1,2*frame.width*frame.height+frame.header_size,frame.handle)) { perror("SMV file fread"); exit(9); } printf("mmap(%s) = %p\n",frame.filename,frame.mmapdata); } } else { /* fopen() failed */ perror(filename); frame.header_size=0; } return frame; } /* read floating-point values from keywords in an SMV header */ double ValueOf(const char *keyword, SMVinfo frame) { double value; char *string; int keylen = strlen(keyword); /* start at the beginning */ string = frame.header; /* find first instance of keyword in the header */ // string = (char *) strstr(frame.header, keyword); // string = string + keylen; /* find last instance of keyword in the header */ while ((char *) strstr(string, keyword) != NULL) { string = (char *) strstr(string, keyword)+keylen; } if(string == frame.header) return NAN; /* advance to just after the "=" sign */ string = (char *) strstr(string, "="); if(string == NULL) return 0.0; ++string; value = atof(string); return value; } unsigned char *read_pgm5_bytes(char *filename,unsigned int *returned_width,unsigned int *returned_height) { unsigned char test[512]; unsigned char *array = NULL; FILE *handle = NULL; unsigned int width=0,height=0,maxvalue=0; handle = fopen(filename,"rb"); if(handle) { if(! fread(test,512,1,handle)) { perror("PGM fread header"); exit(9); } if(strstr((const char *) test,"P5")) { /* PGM header: "P5<whitespace>width<whitespace>height<whitespace>maxvalue<single whitespace character>" */ fseek(handle,3,SEEK_SET); if(! fscanf(handle," %u %u %u",&width,&height,&maxvalue)) { perror("PGM fscanf"); exit(9); } /* skip final single whitespsce character (first pixel could have value of "20") */ fseek(handle,1,SEEK_CUR); array = (unsigned char *) calloc(sizeof(unsigned char),width*height); if(! fread(array,width,height,handle)) { perror("PGM fread"); exit(9); } } fclose(handle); } else { perror("PGM fopen"); } *returned_width = width; *returned_height = height; return array; }
GB_binop__rminus_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fc32) // A*D function (colscale): GB (_AxD__rminus_fc32) // D*A function (rowscale): GB (_DxB__rminus_fc32) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_fc32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fc32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fc32) // C=scalar+B GB (_bind1st__rminus_fc32) // C=scalar+B' GB (_bind1st_tran__rminus_fc32) // C=A+scalar GB (_bind2nd__rminus_fc32) // C=A'+scalar GB (_bind2nd_tran__rminus_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_minus (bij, aij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_minus (y, x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FC32 || GxB_NO_RMINUS_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_minus (bij, x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_minus (y, aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_minus (aij, x) ; \ } GrB_Info GB (_bind1st_tran__rminus_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_minus (y, aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_push_relabel.h
/* * Implementation of Goldberg-Tarjan's parallel push-relabel algorithm. Description can be found in * Goldberg, Andrew and Tarjan, Robert, A New Approach to the Maximum-Flow Problem, J. ACM, 1988. * * This implementation is also based on detailed pseudocode presented in * Baumstark, Niklas, Speeding up Maximum Flow Computations on Shared-Memory Platforms, KIT, Karlsruhe, 2014. */ #ifndef MAXFLOW_PARALLEL_PUSH_RELABEL_H #define MAXFLOW_PARALLEL_PUSH_RELABEL_H #include <memory> #include <chrono> #include <iostream> #include <atomic> #include <omp.h> #include <algorithm> #include "../../common_types.h" #include "../../data_structures/queue.h" #include "../../data_structures/thread_local_buffer_pool.h" #ifndef CACHE_LINE_SIZE #define CACHE_LINE_SIZE 64 #endif namespace parallel_push_relabel { template <template <class> typename vector, typename T, typename U> class max_flow_instance { struct alignas (CACHE_LINE_SIZE) vertex { U excess { 0 }; std::atomic<U> new_excess { 0 }; T label; T new_label; std::atomic_flag discovered = ATOMIC_FLAG_INIT; }; vector<vector<cached_edge<T, U>>> & _residual_network; std::unique_ptr<vertex[]> _vertices; std::unique_ptr<T[]> _active { }; data_structures::thread_local_buffer_pool<T> _pool; T _source, _sink, _relabel_threshold, _active_cnt; std::size_t _relabel_progress; const T _thread_count; public: max_flow_instance ( vector<vector<cached_edge<T, U>>> & graph, T source, T sink, std::size_t thread_count = static_cast<size_t>(omp_get_max_threads ()) ) : _residual_network ( graph ), _vertices ( std::make_unique<vertex[]> ( _residual_network . size () ) ), _active ( std::make_unique<T[]> ( _residual_network . size () ) ), _pool ( data_structures::thread_local_buffer_pool<T> ( thread_count, _residual_network . size () ) ), _source ( source ), _sink ( sink ), _active_cnt ( 0 ), _relabel_progress ( 0 ), _thread_count ( thread_count ) { omp_set_num_threads ( static_cast<int> ( _thread_count ) ); init (); } uint64_t _phase_cnt = 0; uint64_t _push_cnt = 0; uint64_t _global_update_cnt = 0; U find_max_flow ( ) noexcept { find_max_flow_inner (); #ifdef DEBUG std::cout << "global updates:\t" << _global_update_cnt << std::endl; std::cout << "phase cnt: " << _phase_cnt << std::endl; std::cout << "pushes: " << _push_cnt << std::endl; #endif return _vertices[_sink] . new_excess + _vertices[_sink] . excess; } void preflow_to_flow ( ) { std::swap ( _source, _sink ); find_max_flow_inner (); std::swap ( _source, _sink ); #ifdef DEBUG for ( std::size_t i = 0; i < _residual_network . size(); ++i ) if ( i != _source && i != _sink ) if ( _vertices[i] . excess > 0 ) std::cerr << "Excess violation: vertex " << i << ", excess " << _vertices[i] . excess << '\n'; #endif } auto steal_network ( ) { return std::move ( _residual_network ); } private: static constexpr T ALPHA = 6, BETA = 12; static constexpr double GLOBAL_RELABEL_FREQ = 0.5; void init ( ) noexcept { #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i < _residual_network[_source] . size (); ++i ) { auto & edge = _residual_network[_source][i]; _vertices[edge . dst_vertex] . excess = edge . r_capacity; edge . reverse_r_capacity += edge . r_capacity; _residual_network[edge . dst_vertex][edge . reverse_edge_index] . r_capacity += edge . r_capacity; _residual_network[edge . dst_vertex][edge . reverse_edge_index] . reverse_r_capacity -= edge . r_capacity; edge . r_capacity = 0; } T m = 0; for ( std::size_t i = 0; i < _residual_network . size (); ++i ) m += _residual_network[i] . size (); _relabel_threshold = _residual_network . size () * ALPHA + m / 2; } void find_max_flow_inner ( ) { global_relabel (); for ( ;; ) { if ( _active_cnt == 0 ) return; ++_phase_cnt; uint64_t push_cnt_per_phase = 0; #pragma omp parallel { #pragma omp for schedule(static) reduction(+:push_cnt_per_phase) for ( T i = 0; i < _active_cnt; ++i ) { auto thr_id = omp_get_thread_num (); auto vertex = _active[i]; if ( _vertices[vertex] . label == _residual_network . size () ) continue; push ( vertex, _vertices[vertex] . label, thr_id, push_cnt_per_phase ); } //stage 2 #pragma omp for schedule(static) reduction(+:_relabel_progress) for ( T i = 0; i < _active_cnt; ++i ) { auto thr_id = omp_get_thread_num (); auto vertex = _active[i]; relabel ( vertex, thr_id, _relabel_progress ); } //stage 3 #pragma omp for schedule(static) for ( T i = 0; i < _active_cnt; ++i ) { auto vertex = _active[i]; _vertices[vertex] . label = _vertices[vertex] . new_label; _vertices[vertex] . discovered . clear ( std::memory_order_relaxed ); } //stage 4 #pragma omp single _active_cnt = _pool . swap_data ( _active ); #pragma omp for schedule(static) for ( T i = 0; i < _active_cnt; ++i ) { auto vertex = _active[i]; _vertices[vertex] . excess += _vertices[vertex] . new_excess . load ( std::memory_order_relaxed ); _vertices[vertex] . new_excess . store ( 0, std::memory_order_relaxed ); _vertices[vertex] . discovered . clear ( std::memory_order_relaxed ); } } if ( _relabel_progress * GLOBAL_RELABEL_FREQ >= _relabel_threshold || push_cnt_per_phase == 0 ) { _relabel_progress = 0; global_relabel (); } _push_cnt += push_cnt_per_phase; } } inline void push ( const T vertex, const T label, int thr_id, uint64_t & push_cnt ) noexcept { const auto target_label = label - 1; for ( auto & edge : _residual_network[vertex] ) { if ( edge . r_capacity > 0 && _vertices[edge . dst_vertex] . label == target_label ) { auto flow = std::min ( _vertices[vertex] . excess, edge . r_capacity ); if ( edge . dst_vertex != _source && edge . dst_vertex != _sink ) if ( !_vertices[edge . dst_vertex] . discovered . test_and_set ( std::memory_order_relaxed ) ) _pool . push_back ( edge . dst_vertex, static_cast<size_t>(thr_id) ); ++push_cnt; _vertices[vertex] . excess -= flow; _vertices[edge . dst_vertex] . new_excess . fetch_add ( flow, std::memory_order_relaxed ); edge . r_capacity -= flow; edge . reverse_r_capacity += flow; _residual_network[edge . dst_vertex][edge . reverse_edge_index] . reverse_r_capacity -= flow; _residual_network[edge . dst_vertex][edge . reverse_edge_index] . r_capacity += flow; if ( _vertices[vertex] . excess == 0 ) return; } } } inline void relabel ( const T vertex, const int thr_id, std::size_t & relabel_progress ) noexcept { if ( _vertices[vertex] . excess > 0 || _vertices[vertex] . label == _residual_network . size () ) { relabel_progress += BETA; _vertices[vertex] . new_label = calculate_new_label ( vertex ); relabel_progress += _residual_network[vertex] . size (); if ( _vertices[vertex] . new_label == _residual_network . size () ) { _vertices[vertex] . excess += _vertices[vertex] . new_excess; _vertices[vertex] . new_excess = 0; return; } if ( !_vertices[vertex] . discovered . test_and_set ( std::memory_order_relaxed ) ) _pool . push_back ( vertex, static_cast<size_t>(thr_id) ); } else _vertices[vertex] . new_label = _vertices[vertex] . label; } inline T calculate_new_label ( const T vertex ) noexcept { T increase_to = _residual_network . size () - 1; for ( auto & edge : _residual_network[vertex] ) { if ( edge . r_capacity == 0 ) continue; increase_to = std::min ( increase_to, _vertices[edge . dst_vertex] . label ); } return increase_to + 1; } void global_relabel ( ) noexcept { ++_global_update_cnt; const auto not_reached = _residual_network . size (); #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i < _residual_network . size (); ++i ) _vertices[i] . label = not_reached; _vertices[_sink] . label = 0; _vertices[_sink] . discovered . test_and_set (); assert ( _pool . empty () ); _active[0] = _sink; std::size_t current_queue_size = 1; T current_distance = 0; while ( current_queue_size > 0 ) { #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i < current_queue_size; ++i ) { auto thr_id = omp_get_thread_num (); auto current_vertex = _active[i]; for ( auto edge : _residual_network[current_vertex] ) { if ( edge . reverse_r_capacity > 0 ) { if ( !_vertices[edge . dst_vertex] . discovered . test_and_set ( std::memory_order_relaxed ) ) { _vertices[edge . dst_vertex] . label = current_distance + 1; _pool . push_back ( edge . dst_vertex, static_cast<std::size_t>(thr_id) ); } } } } current_queue_size = _pool . swap_data ( _active ); ++current_distance; } #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i < _residual_network . size (); ++i ) { auto thr_id = omp_get_thread_num (); if ( _vertices[i] . label != not_reached && _vertices[i] . excess > 0 && i != _sink ) _pool . push_back ( i, static_cast<size_t>(thr_id) ); _vertices[i] . discovered . clear ( std::memory_order_relaxed ); } _active_cnt = _pool . swap_data ( _active ); } }; } #endif //MAXFLOW_PARALLEL_PUSH_RELABEL_H
phonon.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include "phonon.h" #include <math.h> #include <stddef.h> #include <string.h> #include "dynmat.h" #include "lapack_wrapper.h" static long collect_undone_grid_points(long *undone, char *phonon_done, const long num_grid_points, const long *grid_points); static void get_undone_phonons( double *frequencies, lapack_complex_double *eigenvectors, const long *undone_grid_points, const long num_undone_grid_points, const long (*grid_address)[3], const double QDinv[3][3], const double *fc2, const double (*svecs_fc2)[3], const long (*multi_fc2)[2], const long num_patom, const long num_satom, const double *masses_fc2, const long *p2s_fc2, const long *s2p_fc2, const double unit_conversion_factor, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const char uplo); static void get_gonze_undone_phonons( double *frequencies, lapack_complex_double *eigenvectors, const long *undone_grid_points, const long num_undone_grid_points, const long (*grid_address)[3], const double QDinv[3][3], const double *fc2, const double (*svecs_fc2)[3], const long (*multi_fc2)[2], const double (*positions)[3], const long num_patom, const long num_satom, const double *masses_fc2, const long *p2s_fc2, const long *s2p_fc2, const double unit_conversion_factor, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double *dd_q0, const double (*G_list)[3], const long num_G_points, const double lambda, const char uplo); static void get_phonons(lapack_complex_double *eigvecs, const double q[3], const double *fc2, const double *masses, const long *p2s, const long *s2p, const long (*multi)[2], const long num_patom, const long num_satom, const double (*svecs)[3], const long is_nac, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double unit_conversion_factor); static void get_gonze_phonons( lapack_complex_double *eigvecs, const double q[3], const double *fc2, const double *masses, const long *p2s, const long *s2p, const long (*multi)[2], const double (*positions)[3], const long num_patom, const long num_satom, const double (*svecs)[3], const long is_nac, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double *dd_q0, const double (*G_list)[3], const long num_G_points, const double lambda); static void get_dynamical_matrix( lapack_complex_double *dynmat, const double q[3], const double *fc2, const double *masses, const long *p2s, const long *s2p, const long (*multi)[2], const long num_patom, const long num_satom, const double (*svecs)[3], const long is_nac, const double (*born)[3][3], /* Wang NAC unless NULL */ const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor); static void get_charge_sum(double (*charge_sum)[3][3], const long num_patom, const long num_satom, const double q[3], const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor); static long needs_nac(const double (*born)[3][3], const long (*grid_address)[3], const long gp, const double *q_direction); void phn_get_phonons_at_gridpoints( double *frequencies, lapack_complex_double *eigenvectors, char *phonon_done, const long num_phonons, const long *grid_points, const long num_grid_points, const long (*grid_address)[3], const double QDinv[3][3], const double *fc2, const double (*svecs_fc2)[3], const long (*multi_fc2)[2], const long num_patom, const long num_satom, const double *masses_fc2, const long *p2s_fc2, const long *s2p_fc2, const double unit_conversion_factor, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, /* must be pointer */ const double nac_factor, const char uplo) { long num_undone; long *undone; undone = (long *)malloc(sizeof(long) * num_phonons); num_undone = collect_undone_grid_points(undone, phonon_done, num_grid_points, grid_points); get_undone_phonons(frequencies, eigenvectors, undone, num_undone, grid_address, QDinv, fc2, svecs_fc2, multi_fc2, num_patom, num_satom, masses_fc2, p2s_fc2, s2p_fc2, unit_conversion_factor, born, dielectric, reciprocal_lattice, q_direction, nac_factor, uplo); free(undone); undone = NULL; } void phn_get_gonze_phonons_at_gridpoints( double *frequencies, lapack_complex_double *eigenvectors, char *phonon_done, const long num_phonons, const long *grid_points, const long num_grid_points, const long (*grid_address)[3], const double QDinv[3][3], const double *fc2, const double (*svecs_fc2)[3], const long (*multi_fc2)[2], const double (*positions)[3], const long num_patom, const long num_satom, const double *masses_fc2, const long *p2s_fc2, const long *s2p_fc2, const double unit_conversion_factor, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, /* pointer */ const double nac_factor, const double *dd_q0, const double (*G_list)[3], const long num_G_points, const double lambda, const char uplo) { long num_undone; long *undone; undone = (long *)malloc(sizeof(long) * num_phonons); num_undone = collect_undone_grid_points(undone, phonon_done, num_grid_points, grid_points); get_gonze_undone_phonons( frequencies, eigenvectors, undone, num_undone, grid_address, QDinv, fc2, svecs_fc2, multi_fc2, positions, num_patom, num_satom, masses_fc2, p2s_fc2, s2p_fc2, unit_conversion_factor, born, dielectric, reciprocal_lattice, q_direction, nac_factor, dd_q0, G_list, num_G_points, lambda, uplo); free(undone); undone = NULL; } static long collect_undone_grid_points(long *undone, char *phonon_done, const long num_grid_points, const long *grid_points) { long i, gp, num_undone; num_undone = 0; for (i = 0; i < num_grid_points; i++) { gp = grid_points[i]; if (phonon_done[gp] == 0) { undone[num_undone] = gp; num_undone++; phonon_done[gp] = 1; } } return num_undone; } static void get_undone_phonons( double *frequencies, lapack_complex_double *eigenvectors, const long *undone_grid_points, const long num_undone_grid_points, const long (*grid_address)[3], const double QDinv[3][3], const double *fc2, const double (*svecs_fc2)[3], const long (*multi_fc2)[2], const long num_patom, const long num_satom, const double *masses_fc2, const long *p2s_fc2, const long *s2p_fc2, const double unit_conversion_factor, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const char uplo) { long i, j, gp, num_band; long is_nac, info; double q[3]; double *freqs_tmp; num_band = num_patom * 3; #ifdef _OPENMP #pragma omp parallel for private(j, q, gp, is_nac) #endif for (i = 0; i < num_undone_grid_points; i++) { gp = undone_grid_points[i]; for (j = 0; j < 3; j++) { q[j] = QDinv[j][0] * grid_address[gp][0] + QDinv[j][1] * grid_address[gp][1] + QDinv[j][2] * grid_address[gp][2]; } is_nac = needs_nac(born, grid_address, gp, q_direction); get_phonons(eigenvectors + num_band * num_band * gp, q, fc2, masses_fc2, p2s_fc2, s2p_fc2, multi_fc2, num_patom, num_satom, svecs_fc2, is_nac, born, dielectric, reciprocal_lattice, q_direction, nac_factor, unit_conversion_factor); } /* To avoid multithreaded BLAS in OpenMP loop */ #ifdef _OPENMP #ifndef MULTITHREADED_BLAS #pragma omp parallel for private(j, gp, freqs_tmp, info) #endif #endif for (i = 0; i < num_undone_grid_points; i++) { gp = undone_grid_points[i]; freqs_tmp = frequencies + num_band * gp; /* Store eigenvalues in freqs array. */ /* Eigenvectors are overwritten on eigvecs array. */ info = phonopy_zheev(freqs_tmp, eigenvectors + num_band * num_band * gp, num_band, uplo); /* Sqrt of eigenvalues are re-stored in freqs array.*/ for (j = 0; j < num_band; j++) { freqs_tmp[j] = sqrt(fabs(freqs_tmp[j])) * ((freqs_tmp[j] > 0) - (freqs_tmp[j] < 0)) * unit_conversion_factor; } } } static void get_gonze_undone_phonons( double *frequencies, lapack_complex_double *eigenvectors, const long *undone_grid_points, const long num_undone_grid_points, const long (*grid_address)[3], const double QDinv[3][3], const double *fc2, const double (*svecs_fc2)[3], const long (*multi_fc2)[2], const double (*positions)[3], const long num_patom, const long num_satom, const double *masses_fc2, const long *p2s_fc2, const long *s2p_fc2, const double unit_conversion_factor, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double *dd_q0, const double (*G_list)[3], const long num_G_points, const double lambda, const char uplo) { long i, j, gp, num_band; long is_nac, info; double q[3]; double *freqs_tmp; num_band = num_patom * 3; #ifdef _OPENMP #pragma omp parallel for private(j, q, gp, is_nac) #endif for (i = 0; i < num_undone_grid_points; i++) { gp = undone_grid_points[i]; for (j = 0; j < 3; j++) { q[j] = QDinv[j][0] * grid_address[gp][0] + QDinv[j][1] * grid_address[gp][1] + QDinv[j][2] * grid_address[gp][2]; } is_nac = needs_nac(born, grid_address, gp, q_direction); get_gonze_phonons(eigenvectors + num_band * num_band * gp, q, fc2, masses_fc2, p2s_fc2, s2p_fc2, multi_fc2, positions, num_patom, num_satom, svecs_fc2, is_nac, born, dielectric, reciprocal_lattice, q_direction, nac_factor, dd_q0, G_list, num_G_points, lambda); } /* To avoid multithreaded BLAS in OpenMP loop */ #ifdef _OPENMP #ifndef MULTITHREADED_BLAS #pragma omp parallel for private(j, gp, freqs_tmp, info) #endif #endif for (i = 0; i < num_undone_grid_points; i++) { gp = undone_grid_points[i]; /* Store eigenvalues in freqs array. */ /* Eigenvectors are overwritten on eigvecs array. */ freqs_tmp = frequencies + num_band * gp; info = phonopy_zheev(freqs_tmp, eigenvectors + num_band * num_band * gp, num_band, uplo); /* Sqrt of eigenvalues are re-stored in freqs array.*/ for (j = 0; j < num_band; j++) { freqs_tmp[j] = sqrt(fabs(freqs_tmp[j])) * ((freqs_tmp[j] > 0) - (freqs_tmp[j] < 0)) * unit_conversion_factor; } } } static void get_phonons(lapack_complex_double *eigvecs, const double q[3], const double *fc2, const double *masses, const long *p2s, const long *s2p, const long (*multi)[2], const long num_patom, const long num_satom, const double (*svecs)[3], const long is_nac, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double unit_conversion_factor) { /* Store dynamical matrix in eigvecs array. */ get_dynamical_matrix(eigvecs, q, fc2, masses, p2s, s2p, multi, num_patom, num_satom, svecs, is_nac, born, dielectric, reciprocal_lattice, q_direction, nac_factor); } static void get_gonze_phonons( lapack_complex_double *eigvecs, const double q[3], const double *fc2, const double *masses, const long *p2s, const long *s2p, const long (*multi)[2], const double (*positions)[3], const long num_patom, const long num_satom, const double (*svecs)[3], const long is_nac, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double *dd_q0, const double (*G_list)[3], const long num_G_points, const double lambda) { long i, j, k, l, adrs, num_band; double mm; double q_cart[3]; double *q_dir_cart; lapack_complex_double *dd; dd = NULL; q_dir_cart = NULL; num_band = num_patom * 3; dym_get_dynamical_matrix_at_q((double *)eigvecs, num_patom, num_satom, fc2, q, svecs, multi, masses, s2p, p2s, NULL, 0); dd = (lapack_complex_double *)malloc(sizeof(lapack_complex_double) * num_band * num_band); for (i = 0; i < 3; i++) { q_cart[i] = 0; for (j = 0; j < 3; j++) { q_cart[i] += reciprocal_lattice[i][j] * q[j]; } } if (q_direction) { q_dir_cart = (double *)malloc(sizeof(double) * 3); for (i = 0; i < 3; i++) { q_dir_cart[i] = 0; for (j = 0; j < 3; j++) { q_dir_cart[i] += reciprocal_lattice[i][j] * q_direction[j]; } } } dym_get_recip_dipole_dipole((double *)dd, dd_q0, G_list, num_G_points, num_patom, q_cart, q_dir_cart, born, dielectric, positions, nac_factor, lambda, 1e-5); if (q_direction) { free(q_dir_cart); q_dir_cart = NULL; } for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { mm = sqrt(masses[i] * masses[j]); for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l; eigvecs[adrs] = lapack_make_complex_double( lapack_complex_double_real(eigvecs[adrs]) + lapack_complex_double_real(dd[adrs]) / mm, lapack_complex_double_imag(eigvecs[adrs]) + lapack_complex_double_imag(dd[adrs]) / mm); } } } } free(dd); dd = NULL; } static void get_dynamical_matrix( lapack_complex_double *dynmat, const double q[3], const double *fc2, const double *masses, const long *p2s, const long *s2p, const long (*multi)[2], const long num_patom, const long num_satom, const double (*svecs)[3], const long is_nac, const double (*born)[3][3], /* Wang NAC unless NULL */ const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor) { double(*charge_sum)[3][3]; charge_sum = NULL; if (is_nac) { charge_sum = (double(*)[3][3])malloc(sizeof(double[3][3]) * num_patom * num_patom * 9); get_charge_sum(charge_sum, num_patom, num_satom, q, born, dielectric, reciprocal_lattice, q_direction, nac_factor); } dym_get_dynamical_matrix_at_q((double *)dynmat, num_patom, num_satom, fc2, q, svecs, multi, masses, s2p, p2s, charge_sum, 0); if (is_nac) { free(charge_sum); charge_sum = NULL; } } static void get_charge_sum(double (*charge_sum)[3][3], const long num_patom, const long num_satom, const double q[3], const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor) { long i, j; double inv_dielectric_factor, dielectric_factor, tmp_val; double q_cart[3]; if (q_direction) { for (i = 0; i < 3; i++) { q_cart[i] = 0.0; for (j = 0; j < 3; j++) { q_cart[i] += reciprocal_lattice[i][j] * q_direction[j]; } } } else { for (i = 0; i < 3; i++) { q_cart[i] = 0.0; for (j = 0; j < 3; j++) { q_cart[i] += reciprocal_lattice[i][j] * q[j]; } } } inv_dielectric_factor = 0.0; for (i = 0; i < 3; i++) { tmp_val = 0.0; for (j = 0; j < 3; j++) { tmp_val += dielectric[i][j] * q_cart[j]; } inv_dielectric_factor += tmp_val * q_cart[i]; } /* N = num_satom / num_patom = number of prim-cell in supercell */ /* N is used for Wang's method. */ dielectric_factor = nac_factor / inv_dielectric_factor / num_satom * num_patom; dym_get_charge_sum(charge_sum, num_patom, dielectric_factor, q_cart, born); } static long needs_nac(const double (*born)[3][3], const long (*grid_address)[3], const long gp, const double *q_direction) { long is_nac; if (born) { if (grid_address[gp][0] == 0 && grid_address[gp][1] == 0 && grid_address[gp][2] == 0 && q_direction == NULL) { is_nac = 0; } else { is_nac = 1; } } else { is_nac = 0; } return is_nac; }
im2col_dnnlowp.h
#pragma once #ifdef _OPENMP #include <omp.h> #endif #include "caffe2/core/operator.h" #include "caffe2/utils/math.h" #include "caffe2/utils/math_utils.h" namespace caffe2 { namespace math { template <typename T> static void Im2ColNCHW( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const T* data_im, T* data_col, CPUContext* /*context*/, const T& zero_point = 0) { const int output_h = (height + pad_b + pad_t - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int output_w = (width + pad_l + pad_r - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; // Fast path for zero padding and no dilation // From Torch, THNN_(unfolded_copy) if (dilation_h == 1 && dilation_w == 1 && pad_l == 0 && pad_r == 0 && pad_t == 0 && pad_b == 0) { for (auto k = 0; k < channels * kernel_h * kernel_w; k++) { const auto nip = k / (kernel_h * kernel_w); const auto rest = k % (kernel_h * kernel_w); const auto kh = rest / kernel_w; const auto kw = rest % kernel_w; auto* dst = data_col + nip * (kernel_h * kernel_w * output_h * output_w) + kh * (kernel_w * output_h * output_w) + kw * (output_h * output_w); const auto* src = data_im + nip * (height * width); for (auto y = 0; y < output_h; y++) { const auto iy = y * stride_h + kh; const auto ix = kw; if (stride_w == 1) { memcpy( dst + (y * output_w), src + (iy * width + ix), sizeof(T) * output_w); } else { for (auto x = 0; x < output_w; x++) { memcpy( dst + (y * output_w + x), src + (iy * width + ix + x * stride_w), sizeof(T)); } } } } return; } // Fast path for equal padding if (pad_l == pad_r && pad_t == pad_b) { // From Intel, https://github.com/BVLC/caffe/pull/3536 const int pad_h = pad_t; const int pad_w = pad_l; const int channel_size = height * width; for (int channel = channels; channel--; data_im += channel_size) { for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) { for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) { int input_row = -pad_h + kernel_row * dilation_h; for (int output_rows = output_h; output_rows; output_rows--) { if (!utils::IsAGeZeroAndALtB(input_row, height)) { for (int output_cols = output_w; output_cols; output_cols--) { *(data_col++) = zero_point; } } else { int input_col = -pad_w + kernel_col * dilation_w; for (int output_col = output_w; output_col; output_col--) { if (utils::IsAGeZeroAndALtB(input_col, width)) { *(data_col++) = data_im[input_row * width + input_col]; } else { *(data_col++) = zero_point; } input_col += stride_w; } } input_row += stride_h; } } } } return; } // Baseline const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int channels_col = channels * kernel_h * kernel_w; for (int c = 0; c < channels_col; ++c) { int w_offset = c % kernel_w; int h_offset = (c / kernel_w) % kernel_h; int c_im = c / kernel_h / kernel_w; for (int h = 0; h < height_col; ++h) { for (int w = 0; w < width_col; ++w) { int h_pad = h * stride_h - pad_t + h_offset * dilation_h; int w_pad = w * stride_w - pad_l + w_offset * dilation_w; if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) data_col[(c * height_col + h) * width_col + w] = data_im[(c_im * height + h_pad) * width + w_pad]; else data_col[(c * height_col + h) * width_col + w] = zero_point; } } } } template <typename T> static void Im2ColNdNCHW( const int N, const int /* img_size*/, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const T* X_data, T* Y_data, CPUContext* /* context */, const T& zero_point = 0) { const int outer_size = col_shape[0]; const int inner_size = col_size / outer_size; const int kernel_size = std::accumulate( kernel_shape, kernel_shape + N, 1, std::multiplies<int>()); std::vector<int> d_offset(N, 0); std::vector<int> d_iter(N, 0); for (int i = 0; i < outer_size; ++i) { // Loop over spatial axes in reverse order to compute a per-axis offset. int offset = i; for (int d_i = N - 1; d_i >= 0; --d_i) { d_offset[d_i] = offset % kernel_shape[d_i]; offset /= kernel_shape[d_i]; } for (int j = 0; j < inner_size; ++j) { // Loop over spatial axes in forward order to compute the indices in the // image and column, and whether the index lies in the padding. const int col_index = i * inner_size + j; int img_index = i / kernel_size; bool is_padding = false; for (int d_i = 0; d_i < N; ++d_i) { const int d_img = d_iter[d_i] * stride[d_i] - pad[d_i] + d_offset[d_i] * dilation[d_i]; is_padding |= d_img < 0 || d_img >= img_shape[d_i + 1]; img_index = img_index * img_shape[d_i + 1] + d_img; } Y_data[col_index] = is_padding ? zero_point : X_data[img_index]; utils::IncreaseIndexInDims(N, col_shape + 1, d_iter.data()); } } } /** * The layout of the result is N H W G R S C/G. * Note that groups are pulled out to an outer dimension so that we can use * GEMMs efficiently. */ template <typename T> static void Im2ColNHWC( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const T* data_im, T* data_col, CPUContext* /*context*/, const int groups, const T& zero_point) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; #ifdef _OPENMP #pragma omp parallel for if (!omp_in_parallel()) #endif for (int h = 0; h < height_col; ++h) { int h_pad = -pad_t + h * stride_h; T* data_col_temp = data_col + h * width_col * kernel_h * kernel_w * channels; int w_pad = -pad_l; for (int w = 0; w < width_col; ++w) { int r = 0; for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h, ++r) { int s = 0; for (int iw = w_pad; iw < w_pad + dkernel_w; iw += dilation_w, ++s) { if (ih >= 0 && ih < height && iw >= 0 && iw < width) { for (int g = 0; g < groups; ++g) { memcpy( data_col_temp + ((g * kernel_h + r) * kernel_w + s) * (channels / groups), data_im + (ih * width + iw) * channels + g * (channels / groups), sizeof(T) * (channels / groups)); } } else { // This should be simply padded with zero. for (int g = 0; g < groups; ++g) { for (int i = 0; i < channels / groups; ++i) { data_col_temp [(((g * kernel_h + r) * kernel_w) + s) * (channels / groups) + i] = zero_point; } } } } // for each iw } // for each ih data_col_temp += kernel_h * kernel_w * channels; w_pad += stride_w; } // for each output pixel } // for each image row } /** * The layout of the result is N T H W G Q R S C/G. * Note that groups are pulled out to an outer dimension so that we can use * GEMMs efficiently. */ template <typename T> static void Im2Col3DNHWC( const int channels, const int num_frames, const int height, const int width, const int kernel_t, const int kernel_h, const int kernel_w, const int dilation_t, const int dilation_h, const int dilation_w, const int pad_p, // previous frame const int pad_t, // top const int pad_l, // left const int pad_n, // next frame const int pad_b, // bottom const int pad_r, // right const int stride_t, const int stride_h, const int stride_w, const T* data_im, T* data_col, CPUContext* /*context*/, const int groups, const T& zero_point) { const int dkernel_t = dilation_t * (kernel_t - 1) + 1; const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int frame_col = (num_frames + pad_p + pad_n - dkernel_t) / stride_t + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; #ifdef _OPENMP #pragma omp parallel for if (!omp_in_parallel()) #endif for (int t = 0; t < frame_col; ++t) { int t_pad = -pad_p + t * stride_t; for (int h = 0; h < height_col; ++h) { int h_pad = -pad_t + h * stride_h; T* data_col_temp = data_col + (t * height_col + h) * width_col * kernel_t * kernel_h * kernel_w * channels; for (int w = 0; w < width_col; ++w) { int w_pad = -pad_l + w * stride_w; int q = 0; for (int it = t_pad; it < t_pad + dkernel_t; it += dilation_t, ++q) { int r = 0; for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h, ++r) { int s = 0; for (int iw = w_pad; iw < w_pad + dkernel_w; iw += dilation_w, ++s) { if (it >= 0 && it < num_frames && ih >= 0 && ih < height && iw >= 0 && iw < width) { for (int g = 0; g < groups; ++g) { memcpy( data_col_temp + (((g * kernel_t + q) * kernel_h + r) * kernel_w + s) * (channels / groups), data_im + ((it * height + ih) * width + iw) * channels + g * (channels / groups), sizeof(T) * (channels / groups)); } } else { // This should be simply padded with zero. for (int g = 0; g < groups; ++g) { for (int i = 0; i < channels / groups; ++i) { data_col_temp [((((g * kernel_t + q) * kernel_h + r) * kernel_w) + s) * (channels / groups) + i] = zero_point; } } } } // for each iw } // for each ih } // for each it data_col_temp += kernel_t * kernel_h * kernel_w * channels; } // for each output pixel } // for each image row } // for each frame } } // namespace math } // namespace caffe2
gemm.c
#include "gemm.h" #include "utils.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include "Enclave_u.h" #include "sgx_err.h" #include "sys/sysinfo.h" int num_cpu; #ifdef OPENMP #include <omp.h> #endif typedef struct gemm_thread_data { int TA, TB, M, N, K; float ALPHA; float *A; int lda; float *B; int ldb; float BETA; float *C; int ldc; }gemm_thread_data; extern sgx_enclave_id_t EID; void thread_gemm(void* ptr) { gemm_thread_data* data = (gemm_thread_data*)ptr; sgx_status_t ret = ecall_gemm(EID, data->TA, data->TB, data->M, data->N, data->K, data->ALPHA, &data->A, data->lda, &data->B, data->ldb, data->BETA, &data->C, data->ldc); if(ret != SGX_SUCCESS) { print_error_message(ret); return -1; } } void gemm_segmentation(int TA, int TB, int M, int N, int K, float ALPHA, float **A, int lda, float **B, int ldb, float BETA, float **C, int ldc) { num_cpu = get_nprocs_conf(); int offset_sum = 0; pthread_t *threads = calloc(num_cpu, sizeof(pthread_t)); float *a, *b, *c; a = *A, b = *B, c = *C; for(int i = 0; i < num_cpu; i++) { pthread_t thread; int batch_offset = (i + 1) * M / num_cpu - i * M / num_cpu; gemm_thread_data args = {TA, TB, batch_offset, N, K, ALPHA, a + (offset_sum * K), lda, b, ldb, BETA, c, ldc}; offset_sum += batch_offset; if(pthread_create(&thread, 0, thread_gemm, (void *)&args)) error("Thread creation failed"); threads[i] = thread; } for(int i = 0; i < num_cpu; ++i){ pthread_join(threads[i], 0); } } void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float *m = calloc(rows*cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } /* ** 该函数只是调用了gemm_cpu()函数,并且将参数原封不动的传给gemm_cpu() */ void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { num_cpu = get_nprocs_conf(); gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } /* ** 功能:被gemm_cpu()函数调用,实际完成C = ALPHA * A * B + C 矩阵计算, ** 输出的C也是按行存储(所有行并成一行) ** 输入: A,B,C 输入矩阵(一维数组格式) ** ALPHA 系数 ** BETA 系数 ** M A,C的行数(不做转置)或者A'的行数(做转置),此处A未转置,故为A的行数 ** N B,C的列数(不做转置)或者B'的列数(做转置),此处B未转置,故为B的列数 ** K A的列数(不做转置)或者A'的列数(做转置),B的行数(不做转置)或者B'的行数(做转置),此处A,B均未转置,故为A的列数、B的行数 ** lda A的列数(不做转置)或者A'的行数(做转置),此处A未转置,故为A的列数 ** ldb B的列数(不做转置)或者B'的行数(做转置),此处B未转置,故为B的列数 ** ldc C的列数 ** 说明1:此函数是用C实现矩阵乘法运算,这部分代码应该是模仿的Caffe中的math_functions.cpp的代码 ** 参考博客:http://www.voidcn.com/blog/thy_2014/article/p-6149690.html ** 更为详细的注释参见:gemm_cpu()函数的注释 ** 说明2:此函数在gemm_cpu()函数中调用,是其中四种情况之一,A,B都不进行转置 ** 函数名称gemm_nn()中的两个nn分别表示not transpose, not transpose */ void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { // #pragma omp parallel for num_threads(num_cpu / 2) #pragma omp parallel for num_threads(num_cpu / 2) for(int i = 0; i < M; ++i){ //printf("%d\n", omp_get_num_threads()); for(int k = 0; k < K; ++k){ float A_PART = ALPHA*A[i*lda+k]; for(int j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } /* ** 功能:被gemm_cpu()函数调用,实际完成C = ALPHA * A * B' + C矩阵计算, ** 输出的C也是按行存储(所有行并成一行) ** 输入: A,B,C 输入矩阵(一维数组格式) ** ALPHA 系数 ** BETA 系数 ** M A,C的行数(不做转置)或者A'的行数(做转置),此处A未转置,故为A的行数 ** N B,C的列数(不做转置)或者B'的列数(做转置),此处B转置,故为B’的列数 ** K A的列数(不做转置)或者A'的列数(做转置),B的行数(不做转置)或者B'的行数(做转置),此处A不转置,B转置,故为A的列数、B'的行数 ** lda A的列数(不做转置)或者A'的行数(做转置),此处A未转置,故为A的列数 ** ldb B的列数(不做转置)或者B'的行数(做转置),此处B未转置,故为B'的行数 ** ldc C的列数 ** 说明:此函数是用C实现矩阵乘法运算,这部分代码应该是模仿的Caffe中的math_functions.cpp的代码 ** 参考博客:http://www.voidcn.com/blog/thy_2014/article/p-6149690.html ** 更为详细的注释参见:gemm_cpu()函数的注释 ** 说明2:此函数在gemm_cpu()函数中调用,是其中四种情况之一,A不进行转置,B转置 ** 函数名称gemm_nt()中的nt分别表示not transpose, transpose */ void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { // int i,j,k; #pragma omp parallel for num_threads(num_cpu / 2) for(int i = 0; i < M; ++i){ for(int j = 0; j < N; ++j){ float sum = 0; for(int k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } /* ** 功能:矩阵计算,实际完成C = ALPHA * A' * B + BETA * C矩阵计算 ** 输入: A,B,C 输入矩阵(一维数组格式) ** ALPHA 系数 ** BETA 系数 ** M A,C的行数(不做转置)或者A'的行数(做转置),此处A转置,故为A'的行数 ** N B,C的列数(不做转置)或者B'的列数(做转置),此处B未转置,故为B的列数 ** K A的列数(不做转置)或者A'的列数(做转置),B的行数(不做转置)或者B'的行数(做转置),此处A转置,B不转置,故为A'的列数、B的行数 ** lda A的列数(不做转置)或者A'的行数(做转置),此处A转置,故为A'的行数 ** ldb B的列数(不做转置)或者B'的行数(做转置),此处B未转置,故为B的列数 ** ldc C的列数 ** 说明:此函数是用C实现矩阵乘法运算,这部分代码应该是模仿的Caffe中的math_functions.cpp的代码 ** 参考博客:http://www.voidcn.com/blog/thy_2014/article/p-6149690.html ** 更为详细的注释参见:gemm_cpu()函数的注释 ** 说明2:此函数在gemm_cpu()函数中调用,是其中四种情况之一,A进行转置,B不转置 ** 函数名称gemm_tn()中的tn分别表示transpose, not transpose */ void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { // int i,j,k; #pragma omp parallel for num_threads(num_cpu / 2) for(int i = 0; i < M; ++i){ for(int k = 0; k < K; ++k){ float A_PART = ALPHA*A[k*lda+i]; for(int j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } /* ** 功能:矩阵计算,实际完成C = ALPHA * A' * B' + BETA * C矩阵计算 ** 输入: A,B,C 输入矩阵(一维数组格式) ** ALPHA 系数 ** BETA 系数 ** M A,C的行数(不做转置)或者A'的行数(做转置),此处A转置,故为A'的行数 ** N B,C的列数(不做转置)或者B'的列数(做转置),此处B转置,故为B'的列数 ** K A'的列数,B'的行数 ** lda A的列数(不做转置)或者A'的行数(做转置),此处A转置,故为A'的行数 ** ldb B的列数(不做转置)或者B'的行数(做转置),此处B转置,故为B'的行数 ** ldc C的列数 ** 说明:此函数是用C实现矩阵乘法运算,这部分代码应该是模仿的Caffe中的math_functions.cpp的代码 ** 参考博客:http://www.voidcn.com/blog/thy_2014/article/p-6149690.html ** 更为详细的注释参见:gemm_cpu()函数的注释 ** 说明2:此函数在gemm_cpu()函数中调用,是其中四种情况之一,A,B都进行转置 ** 函数名称gemm_tt()中的tt分别表示transpose, transpose */ void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { //int i,j,k; #pragma omp parallel for num_threads(num_cpu / 2) for(int i = 0; i < M; ++i){ for(int j = 0; j < N; ++j){ float sum = 0; for(int k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); // int i, j; // 先把BETA * C计算完了,并将结果存在C中,得到的C将为M行,N列(按行存储在一维数组C中) #pragma omp parallel for num_threads(num_cpu / 2) for(int i = 0; i < M; ++i){ for(int j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } // 根据需要,调用下面四种函数之一 if(!TA && !TB) gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(TA && !TB) gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(!TA && TB) gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); }
par_csr_matop_device.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_Int hypre_ParcsrGetExternalRowsDeviceInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_Int *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j; HYPRE_Int num_sends, num_rows_send, num_nnz_send, num_recvs, num_rows_recv, num_nnz_recv; HYPRE_Int *d_send_i, *send_i, *d_send_map, *d_recv_i, *recv_i; HYPRE_BigInt *d_send_j, *d_recv_j; HYPRE_Int *send_jstarts, *recv_jstarts; HYPRE_Complex *d_send_a = NULL, *d_recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_Int first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_Int first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ d_send_i = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_DEVICE); d_send_map = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE); send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); d_recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE); /* fill the send array with row lengths */ hypre_TMemcpy(d_send_map, hypre_ParCSRCommPkgSendMapElmts(comm_pkg), HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_Memset(d_send_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(num_rows_send, d_send_map, A_diag_i, A_offd_i, d_send_i+1); /* send array send_i out: deviceTohost first and MPI (async) * note the shift in recv_i by one */ hypre_TMemcpy(send_i, d_send_i+1, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); hypreDevice_IntegerInclusiveScan(num_rows_send + 1, d_send_i); /* total number of nnz to send */ hypre_TMemcpy(&num_nnz_send, d_send_i+num_rows_send, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /* prepare data to send out. overlap with the above commmunication */ d_send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_DEVICE); if (want_data) { d_send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_DEVICE); } if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } /* job == 2, d_send_i is input that contains row ptrs (length num_rows_send) */ hypreDevice_CopyParCSRRows(num_rows_send, d_send_map, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, d_send_i, d_send_j, d_send_a); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); send_jstarts[0] = 0; for (i = 1; i <= num_sends; i++) { send_jstarts[i] = send_jstarts[i-1]; for ( j = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i-1); j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); j++ ) { send_jstarts[i] += send_i[j]; } } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ recv_i[0] = 0; for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; /* allocate device memory for j and a */ d_recv_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_DEVICE); if (want_data) { d_recv_a = hypre_TAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_DEVICE); } recv_jstarts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); recv_jstarts[0] = 0; for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_j, HYPRE_MEMORY_DEVICE, d_recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate_v2(1, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_a, HYPRE_MEMORY_DEVICE, d_recv_a); } else { comm_handle_a = NULL; } hypre_TMemcpy(d_recv_i, recv_i, HYPRE_Int, num_rows_recv+1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create A_ext: on device */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixI (A_ext) = d_recv_i; hypre_CSRMatrixBigJ(A_ext) = d_recv_j; hypre_CSRMatrixData(A_ext) = d_recv_a; hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(recv_i, HYPRE_MEMORY_HOST); hypre_TFree(d_send_i, HYPRE_MEMORY_DEVICE); hypre_TFree(d_send_map, HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; HYPRE_BigInt *send_j = comm_handle_j ? (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j) : NULL; HYPRE_Complex *send_a = comm_handle_a ? (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a) : NULL; hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_j, HYPRE_MEMORY_DEVICE); hypre_TFree(send_a, HYPRE_MEMORY_DEVICE); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } hypre_CSRMatrix* hypre_MergeDiagAndOffdDevice(hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt glbal_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); hypre_CSRMatrix *B; HYPRE_Int B_nrows = local_num_rows; HYPRE_BigInt B_ncols = glbal_num_cols; HYPRE_Int *B_i = hypre_TAlloc(HYPRE_Int, B_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_j; HYPRE_Complex *B_a; HYPRE_Int B_nnz; HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); hypre_Memset(B_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(B_nrows, NULL, A_diag_i, A_offd_i, B_i+1); hypreDevice_IntegerInclusiveScan(B_nrows+1, B_i); /* total number of nnz */ hypre_TMemcpy(&B_nnz, B_i+B_nrows, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); B_j = hypre_TAlloc(HYPRE_BigInt, B_nnz, HYPRE_MEMORY_DEVICE); B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } hypreDevice_CopyParCSRRows(B_nrows, NULL, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, B_i, B_j, B_a); /* output */ B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI (B) = B_i; hypre_CSRMatrixBigJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; hypre_SyncCudaComputeStream(hypre_handle()); return B; } HYPRE_Int hypre_ExchangeExternalRowsDeviceInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i_d = hypre_CSRMatrixI(B_ext); HYPRE_BigInt *B_ext_j_d = hypre_CSRMatrixBigJ(B_ext); HYPRE_Complex *B_ext_a_d = hypre_CSRMatrixData(B_ext); HYPRE_Int B_ext_ncols = hypre_CSRMatrixNumCols(B_ext); HYPRE_Int B_ext_nrows = hypre_CSRMatrixNumRows(B_ext); HYPRE_Int B_ext_nnz = hypre_CSRMatrixNumNonzeros(B_ext); HYPRE_Int *B_ext_rownnz_d = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_ext_rownnz_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); HYPRE_Int *B_ext_i_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int_d; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i_h = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_Int *B_int_i_d = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_int_j_d = NULL; HYPRE_Complex *B_int_a_d = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs, my_id; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ HYPRE_THRUST_CALL(adjacent_difference, B_ext_i_d, B_ext_i_d + B_ext_nrows + 1, B_ext_rownnz_d); hypre_TMemcpy(B_ext_rownnz_h, B_ext_rownnz_d + 1, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz_h, B_int_i_h + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; B_ext_i_h[0] = 0; hypre_TMemcpy(B_ext_i_h + 1, B_ext_rownnz_h, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); for (i = 1; i <= B_ext_nrows; i++) { B_ext_i_h[i] += B_ext_i_h[i-1]; } hypre_assert(B_ext_i_h[B_ext_nrows] == B_ext_nnz); for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i_h[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i_h[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i_h[i] += B_int_i_h[i-1]; } B_int_nnz = B_int_i_h[B_int_nrows]; B_int_j_d = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_DEVICE); B_int_a_d = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_DEVICE); for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i_h[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ comm_handle_a = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_a_d, HYPRE_MEMORY_DEVICE, B_int_a_d ); comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_j_d, HYPRE_MEMORY_DEVICE, B_int_j_d ); hypre_TMemcpy(B_int_i_d, B_int_i_h, HYPRE_Int, B_int_nrows+1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create CSR: on device */ B_int_d = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixI(B_int_d) = B_int_i_d; hypre_CSRMatrixBigJ(B_int_d) = B_int_j_d; hypre_CSRMatrixData(B_int_d) = B_int_a_d; hypre_CSRMatrixMemoryLocation(B_int_d) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int_d; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(B_ext_rownnz_d, HYPRE_MEMORY_DEVICE); hypre_TFree(B_ext_rownnz_h, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_i_h, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i_h, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ExchangeExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int_d = (hypre_CSRMatrix *) request[2]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int_d; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ HYPRE_Int hypre_ParCSRMatrixExtractBExtDeviceInit( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data, void **request_ptr) { hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); /* hypre_assert( hypre_GetActualMemLocation( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B))) == HYPRE_MEMORY_DEVICE ); */ hypre_ParcsrGetExternalRowsDeviceInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, request_ptr); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDeviceWait(void *request) { return hypre_ParcsrGetExternalRowsDeviceWait(request); } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDevice( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { void *request; hypre_ParCSRMatrixExtractBExtDeviceInit(B, A, want_data, &request); return hypre_ParCSRMatrixExtractBExtDeviceWait(request); } /* return B = [Adiag, Aoffd] */ #if 1 __global__ void hypreCUDAKernel_ConcatDiagAndOffd(HYPRE_Int nrows, HYPRE_Int diag_ncol, HYPRE_Int *d_diag_i, HYPRE_Int *d_diag_j, HYPRE_Complex *d_diag_a, HYPRE_Int *d_offd_i, HYPRE_Int *d_offd_j, HYPRE_Complex *d_offd_a, HYPRE_Int *cols_offd_map, HYPRE_Int *d_ib, HYPRE_Int *d_jb, HYPRE_Complex *d_ab) { const HYPRE_Int row = hypre_cuda_get_grid_warp_id<1,1>(); if (row >= nrows) { return; } /* lane id inside the warp */ const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int i, j, k, p, istart, iend, bstart; /* diag part */ if (lane_id < 2) { j = read_only_load(d_diag_i + row + lane_id); } if (lane_id == 0) { k = read_only_load(d_ib + row); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); bstart = __shfl_sync(HYPRE_WARP_FULL_MASK, k, 0); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { d_jb[p+i] = read_only_load(d_diag_j + i); d_ab[p+i] = read_only_load(d_diag_a + i); } /* offd part */ if (lane_id < 2) { j = read_only_load(d_offd_i + row + lane_id); } bstart += iend - istart; istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { const HYPRE_Int t = read_only_load(d_offd_j + i); d_jb[p+i] = (cols_offd_map ? read_only_load(&cols_offd_map[t]) : t) + diag_ncol; d_ab[p+i] = read_only_load(d_offd_a + i); } } hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *B = hypre_CSRMatrixCreate( hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd), hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) ); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_CSRMatrixNumRows(B), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_CSRMatrixNumRows(B) + 1, hypre_CSRMatrixI(B) ); const dim3 bDim = hypre_GetDefaultCUDABlockDimension(); const dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), NULL, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); return B; } #else hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); hypre_CSRMatrix *B; HYPRE_Int B_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int B_ncols = hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd); HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, A_offd_j, A_offd_j + A_offd_nnz, thrust::make_constant_iterator(hypre_CSRMatrixNumCols(A_diag)), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(B_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; return B; } #endif /* return B = [Adiag, Aoffd; E] */ #if 1 HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *E_diag, *E_offd, *B; HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; hypre_CSRMatrixSplitDevice(E, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A), hypre_CSRMatrixNumCols(A_offd), hypre_ParCSRMatrixDeviceColMapOffd(A), &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag, &E_offd); B = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E), hypre_ParCSRMatrixNumCols(A) + num_cols_offd, hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) + hypre_CSRMatrixNumNonzeros(E)); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_ParCSRMatrixNumRows(A), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_ParCSRMatrixNumRows(A), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), cols_offd_map, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(E) + 1, HYPRE_Int, hypre_CSRMatrixNumRows(E), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E) + 1, thrust::make_constant_iterator(hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd)), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, thrust::plus<HYPRE_Int>() ); gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(E), "warp", bDim); hypre_assert(hypre_CSRMatrixNumCols(E_diag) == hypre_CSRMatrixNumCols(A_diag)); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(E_diag), hypre_CSRMatrixNumCols(E_diag), hypre_CSRMatrixI(E_diag), hypre_CSRMatrixJ(E_diag), hypre_CSRMatrixData(E_diag), hypre_CSRMatrixI(E_offd), hypre_CSRMatrixJ(E_offd), hypre_CSRMatrixData(E_offd), NULL, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_CSRMatrixDestroy(E_diag); hypre_CSRMatrixDestroy(E_offd); *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #else HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); HYPRE_BigInt first_col_A = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt last_col_A = hypre_ParCSRMatrixLastColDiag(A); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); HYPRE_Int *E_i = hypre_CSRMatrixI(E); HYPRE_BigInt *E_bigj = hypre_CSRMatrixBigJ(E); HYPRE_Complex *E_a = hypre_CSRMatrixData(E); HYPRE_Int E_nrows = hypre_CSRMatrixNumRows(E); HYPRE_Int E_nnz = hypre_CSRMatrixNumNonzeros(E); HYPRE_Int E_diag_nnz, E_offd_nnz; hypre_CSRMatrix *B; HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz + E_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // E hypre_CSRMatrixSplitDevice_core(0, E_nrows, E_nnz, NULL, E_bigj, NULL, NULL, first_col_A, last_col_A, num_cols_offd_A, NULL, NULL, NULL, NULL, &E_diag_nnz, NULL, NULL, NULL, NULL, &E_offd_nnz, NULL, NULL, NULL, NULL); HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; HYPRE_Int *E_ii = hypreDevice_CsrRowPtrsToIndices(E_nrows, E_nnz, E_i); hypre_CSRMatrixSplitDevice_core(1, E_nrows, E_nnz, E_ii, E_bigj, E_a, NULL, first_col_A, last_col_A, num_cols_offd_A, col_map_offd_A, &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag_nnz, B_ii + A_diag_nnz + A_offd_nnz, B_j + A_diag_nnz + A_offd_nnz, B_a + A_diag_nnz + A_offd_nnz, NULL, &E_offd_nnz, B_ii + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_a + A_diag_nnz + A_offd_nnz + E_diag_nnz, NULL); hypre_TFree(E_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_ii + A_diag_nnz + A_offd_nnz, B_ii + B_nnz, thrust::make_constant_iterator(A_nrows), B_ii + A_diag_nnz + A_offd_nnz, thrust::plus<HYPRE_Int>() ); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( gather, A_offd_j, A_offd_j + A_offd_nnz, cols_offd_map, B_j + A_diag_nnz); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz, B_j + A_diag_nnz + A_offd_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + B_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(A_nrows + E_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(A_nrows + E_nrows, A_ncols + num_cols_offd, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #endif HYPRE_Int hypre_ParCSRMatrixGetRowDevice( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { HYPRE_Int nrows, local_row; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return(-1); } hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; nrows = row_end - row_start; if (row < row_start || row >= row_end) { return(-1); } local_row = row - row_start; /* if buffer is not allocated and some information is requested, allocate buffer with the max row_nnz */ if ( !hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values) ) { HYPRE_Int max_row_nnz; HYPRE_Int *row_nnz = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(nrows, NULL, hypre_CSRMatrixI(Aa), hypre_CSRMatrixI(Ba), row_nnz); hypre_TMemcpy(size, row_nnz + local_row, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); max_row_nnz = HYPRE_THRUST_CALL(reduce, row_nnz, row_nnz + nrows, 0, thrust::maximum<HYPRE_Int>()); /* HYPRE_Int *max_row_nnz_d = HYPRE_THRUST_CALL(max_element, row_nnz, row_nnz + nrows); hypre_TMemcpy( &max_row_nnz, max_row_nnz_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE ); */ hypre_TFree(row_nnz, HYPRE_MEMORY_DEVICE); hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_TAlloc(HYPRE_Complex, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_TAlloc(HYPRE_BigInt, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); } else { HYPRE_Int *size_d = hypre_TAlloc(HYPRE_Int, 1, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(1, NULL, hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixI(Ba) + local_row, size_d); hypre_TMemcpy(size, size_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TFree(size_d, HYPRE_MEMORY_DEVICE); } if (col_ind || values) { if (hypre_ParCSRMatrixDeviceColMapOffd(mat) == NULL) { hypre_ParCSRMatrixDeviceColMapOffd(mat) = hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE); hypre_TMemcpy( hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_ParCSRMatrixColMapOffd(mat), HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST ); } hypreDevice_CopyParCSRRows( 1, NULL, -1, Ba != NULL, hypre_ParCSRMatrixFirstColDiag(mat), hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixJ(Aa), hypre_CSRMatrixData(Aa), hypre_CSRMatrixI(Ba) + local_row, hypre_CSRMatrixJ(Ba), hypre_CSRMatrixData(Ba), NULL, hypre_ParCSRMatrixRowindices(mat), hypre_ParCSRMatrixRowvalues(mat) ); } if (col_ind) { *col_ind = hypre_ParCSRMatrixRowindices(mat); } if (values) { *values = hypre_ParCSRMatrixRowvalues(mat); } hypre_SyncCudaComputeStream(hypre_handle()); return hypre_error_flag; } /* abs == 1, use absolute values * option == 0, drop all the entries that are smaller than tol * TODO more options */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntriesDevice( hypre_ParCSRMatrix *A, HYPRE_Complex tol, HYPRE_Int abs, HYPRE_Int option) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *h_col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); if (col_map_offd_A == NULL) { col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(col_map_offd_A, h_col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A; } hypre_CSRMatrixDropSmallEntriesDevice(A_diag, tol, abs, option); hypre_CSRMatrixDropSmallEntriesDevice(A_offd, tol, abs, option); hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); /* squeeze out zero columns of A_offd */ HYPRE_Int *tmp_j, *tmp_end, num_cols_A_offd_new; tmp_j = hypre_TAlloc(HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp_j, hypre_CSRMatrixJ(A_offd), HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( sort, tmp_j, tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) ); tmp_end = HYPRE_THRUST_CALL( unique, tmp_j, tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) ); num_cols_A_offd_new = tmp_end - tmp_j; hypre_assert(num_cols_A_offd_new <= num_cols_A_offd); if (num_cols_A_offd_new < num_cols_A_offd) { hypre_CSRMatrixNumCols(A_offd) = num_cols_A_offd_new; HYPRE_Int *offd_mark = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *col_map_offd_A_new = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( scatter, thrust::counting_iterator<HYPRE_Int>(0), thrust::counting_iterator<HYPRE_Int>(num_cols_A_offd_new), tmp_j, offd_mark ); HYPRE_THRUST_CALL( gather, hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixJ(A_offd) + hypre_CSRMatrixNumNonzeros(A_offd), offd_mark, hypre_CSRMatrixJ(A_offd) ); HYPRE_THRUST_CALL( gather, tmp_j, tmp_j + num_cols_A_offd_new, col_map_offd_A, col_map_offd_A_new ); hypre_TFree(offd_mark, HYPRE_MEMORY_DEVICE); hypre_TFree(col_map_offd_A, HYPRE_MEMORY_DEVICE); hypre_TFree(h_col_map_offd_A, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A_new; hypre_ParCSRMatrixColMapOffd(A) = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(A), col_map_offd_A_new, HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } hypre_TFree(tmp_j, HYPRE_MEMORY_DEVICE); return hypre_error_flag; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /*-------------------------------------------------------------------------- * HYPRE_ParCSRDiagScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRDiagScale( HYPRE_ParCSRMatrix HA, HYPRE_ParVector Hy, HYPRE_ParVector Hx ) { hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA; hypre_ParVector *y = (hypre_ParVector *) Hy; hypre_ParVector *x = (hypre_ParVector *) Hx; HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y)); HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A)); HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x)); HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypreDevice_DiagScaleVector(local_size, A_i, A_data, y_data, 0.0, x_data); //hypre_SyncCudaComputeStream(hypre_handle()); #else /* #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(x_data,y_data,A_data,A_i) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < local_size; i++) { x_data[i] = y_data[i] / A_data[A_i[i]]; } #endif /* #if defined(HYPRE_USING_CUDA) */ return ierr; }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations $reset_tile_sizes // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif double total = 0.0; for (i = 0; i < Nz; ++i) { for (j = 0; j < Ny; ++j) { for (k = 0; k < Nx; ++k) { total += A[Nt%2][i][j][k]; } } } printf("Sum(final): %e\n", total); // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
convolution_sgemm_pack8to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack8to4_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); { int remain_size_start = 0; int nn_size = size >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; int64_t* tmpptr = tmp.channel(i / 2); for (int q = 0; q < inch; q++) { const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { __m128i _v = _mm_loadu_si128((const __m128i*)img0); _mm_storeu_si128((__m128i*)tmpptr, _v); tmpptr += 2; img0 += size; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { int64_t* tmpptr = tmp.channel(i / 2 + i % 2); for (int q = 0; q < inch; q++) { const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; for (; i + 1 < size; i += 2) { const signed char* tmpptr = tmp.channel(i / 2); const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 __m128i _sum00 = _mm_setzero_si128(); __m128i _sum01 = _mm_setzero_si128(); __m128i _sum02 = _mm_setzero_si128(); __m128i _sum03 = _mm_setzero_si128(); __m128i _sum10 = _mm_setzero_si128(); __m128i _sum11 = _mm_setzero_si128(); __m128i _sum12 = _mm_setzero_si128(); __m128i _sum13 = _mm_setzero_si128(); int j = 0; for (; j < nn; j++) { // TODO use _mm_cvtepi8_epi16 on sse4.1 __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01); __m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01); __m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01); // TODO use _mm_cvtepi8_epi16 on sse4.1 __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16)); __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); __m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01); __m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23); __m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23); __m128i _sl00 = _mm_mullo_epi16(_val0, _w0); __m128i _sh00 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl01 = _mm_mullo_epi16(_val0, _w1); __m128i _sh01 = _mm_mulhi_epi16(_val0, _w1); __m128i _sl02 = _mm_mullo_epi16(_val0, _w2); __m128i _sh02 = _mm_mulhi_epi16(_val0, _w2); __m128i _sl03 = _mm_mullo_epi16(_val0, _w3); __m128i _sh03 = _mm_mulhi_epi16(_val0, _w3); __m128i _sl10 = _mm_mullo_epi16(_val1, _w0); __m128i _sh10 = _mm_mulhi_epi16(_val1, _w0); __m128i _sl11 = _mm_mullo_epi16(_val1, _w1); __m128i _sh11 = _mm_mulhi_epi16(_val1, _w1); __m128i _sl12 = _mm_mullo_epi16(_val1, _w2); __m128i _sh12 = _mm_mulhi_epi16(_val1, _w2); __m128i _sl13 = _mm_mullo_epi16(_val1, _w3); __m128i _sh13 = _mm_mulhi_epi16(_val1, _w3); _sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00)); _sum01 = _mm_add_epi32(_sum01, _mm_unpacklo_epi16(_sl01, _sh01)); _sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl02, _sh02)); _sum03 = _mm_add_epi32(_sum03, _mm_unpacklo_epi16(_sl03, _sh03)); _sum00 = _mm_add_epi32(_sum00, _mm_unpackhi_epi16(_sl00, _sh00)); _sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl01, _sh01)); _sum02 = _mm_add_epi32(_sum02, _mm_unpackhi_epi16(_sl02, _sh02)); _sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl03, _sh03)); _sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10)); _sum11 = _mm_add_epi32(_sum11, _mm_unpacklo_epi16(_sl11, _sh11)); _sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl12, _sh12)); _sum13 = _mm_add_epi32(_sum13, _mm_unpacklo_epi16(_sl13, _sh13)); _sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl10, _sh10)); _sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl11, _sh11)); _sum12 = _mm_add_epi32(_sum12, _mm_unpackhi_epi16(_sl12, _sh12)); _sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl13, _sh13)); tmpptr += 16; kptr0 += 32; } // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum00, _sum01); _tmp1 = _mm_unpacklo_epi32(_sum02, _sum03); _tmp2 = _mm_unpackhi_epi32(_sum00, _sum01); _tmp3 = _mm_unpackhi_epi32(_sum02, _sum03); _sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3); } { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum10, _sum11); _tmp1 = _mm_unpacklo_epi32(_sum12, _sum13); _tmp2 = _mm_unpackhi_epi32(_sum10, _sum11); _tmp3 = _mm_unpackhi_epi32(_sum12, _sum13); _sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _sum00 = _mm_add_epi32(_sum00, _sum01); _sum02 = _mm_add_epi32(_sum02, _sum03); _sum10 = _mm_add_epi32(_sum10, _sum11); _sum12 = _mm_add_epi32(_sum12, _sum13); _sum00 = _mm_add_epi32(_sum00, _sum02); _sum10 = _mm_add_epi32(_sum10, _sum12); _mm_storeu_si128((__m128i*)outptr0, _sum00); _mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10); outptr0 += 8; } for (; i < size; i++) { const signed char* tmpptr = tmp.channel(i / 2 + i % 2); const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); int j = 0; for (; j < nn; j++) { // TODO use _mm_cvtepi8_epi16 on sse4.1 __m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr); _val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val)); // TODO use _mm_cvtepi8_epi16 on sse4.1 __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16)); __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); __m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01); __m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23); __m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23); __m128i _sl0 = _mm_mullo_epi16(_val, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val, _w0); __m128i _sl1 = _mm_mullo_epi16(_val, _w1); __m128i _sh1 = _mm_mulhi_epi16(_val, _w1); __m128i _sl2 = _mm_mullo_epi16(_val, _w2); __m128i _sh2 = _mm_mulhi_epi16(_val, _w2); __m128i _sl3 = _mm_mullo_epi16(_val, _w3); __m128i _sh3 = _mm_mulhi_epi16(_val, _w3); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpacklo_epi16(_sl1, _sh1)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl2, _sh2)); _sum3 = _mm_add_epi32(_sum3, _mm_unpacklo_epi16(_sl3, _sh3)); _sum0 = _mm_add_epi32(_sum0, _mm_unpackhi_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1)); _sum2 = _mm_add_epi32(_sum2, _mm_unpackhi_epi16(_sl2, _sh2)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl3, _sh3)); tmpptr += 8; kptr0 += 32; } // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum0, _sum1); _tmp1 = _mm_unpacklo_epi32(_sum2, _sum3); _tmp2 = _mm_unpackhi_epi32(_sum0, _sum1); _tmp3 = _mm_unpackhi_epi32(_sum2, _sum3); _sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); _sum0 = _mm_add_epi32(_sum0, _sum2); _mm_storeu_si128((__m128i*)outptr0, _sum0); outptr0 += 4; } } } static void convolution_im2col_sgemm_transform_kernel_pack8to4_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8a-4b-maxk-inch/8a-outch/4b Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(32 * maxk, inch / 8, outch / 4, 1u); for (int q = 0; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); for (int p = 0; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } } } } static void convolution_im2col_sgemm_pack8to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); int64_t* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const int64_t* sptr = img.row<const int64_t>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack8to4_int8_sse(bottom_im2col, top_blob, kernel, opt); }
pooling.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_POOLING_H_ #define MACE_KERNELS_POOLING_H_ #include <algorithm> #include <limits> #include <memory> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/kernels/conv_pool_2d_util.h" #if defined(MACE_ENABLE_NEON) #include <arm_neon.h> #endif #ifdef MACE_ENABLE_OPENCL #include "mace/core/runtime/opencl/cl2_header.h" #endif // MACE_ENABLE_OPENCL namespace mace { enum PoolingType { AVG = 1, // avg_pool MAX = 2, // max_pool }; namespace kernels { struct PoolingFunctorBase { PoolingFunctorBase(const PoolingType pooling_type, const int *kernels, const int *strides, const Padding padding_type, const std::vector<int> &paddings, const int *dilations) : pooling_type_(pooling_type), kernels_(kernels), strides_(strides), padding_type_(padding_type), paddings_(paddings), dilations_(dilations) {} const PoolingType pooling_type_; const int *kernels_; const int *strides_; const Padding padding_type_; std::vector<int> paddings_; const int *dilations_; }; template <DeviceType D, typename T> struct PoolingFunctor; template <> struct PoolingFunctor<DeviceType::CPU, float>: PoolingFunctorBase { PoolingFunctor(const PoolingType pooling_type, const int *kernels, const int *strides, const Padding padding_type, const std::vector<int> &paddings, const int *dilations) : PoolingFunctorBase( pooling_type, kernels, strides, padding_type, paddings, dilations) { } void MaxPooling(const float *input, const index_t *in_shape, const index_t *out_shape, const int *filter_hw, const int *stride_hw, const int *dilation_hw, const int *pad_hw, float *output) { const index_t in_image_size = in_shape[2] * in_shape[3]; const index_t out_image_size = out_shape[2] * out_shape[3]; const index_t in_batch_size = in_shape[1] * in_image_size; const index_t out_batch_size = out_shape[1] * out_image_size; #pragma omp parallel for collapse(2) for (index_t b = 0; b < out_shape[0]; ++b) { for (index_t c = 0; c < out_shape[1]; ++c) { const index_t out_base = b * out_batch_size + c * out_image_size; const index_t in_base = b * in_batch_size + c * in_image_size; const index_t out_height = out_shape[2]; const index_t out_width = out_shape[3]; const index_t in_height = in_shape[2]; const index_t in_width = in_shape[3]; for (index_t h = 0; h < out_height; ++h) { for (index_t w = 0; w < out_width; ++w) { const index_t out_offset = out_base + h * out_width + w; float res = std::numeric_limits<float>::lowest(); for (int fh = 0; fh < filter_hw[0]; ++fh) { for (int fw = 0; fw < filter_hw[1]; ++fw) { index_t inh = h * stride_hw[0] + dilation_hw[0] * fh - pad_hw[0]; index_t inw = w * stride_hw[1] + dilation_hw[1] * fw - pad_hw[1]; if (inh >= 0 && inh < in_height && inw >= 0 && inw < in_width) { index_t input_offset = in_base + inh * in_width + inw; res = std::max(res, input[input_offset]); } } } output[out_offset] = res; } } } } } void AvgPooling(const float *input, const index_t *in_shape, const index_t *out_shape, const int *filter_hw, const int *stride_hw, const int *dilation_hw, const int *pad_hw, float *output) { const index_t in_image_size = in_shape[2] * in_shape[3]; const index_t out_image_size = out_shape[2] * out_shape[3]; const index_t in_batch_size = in_shape[1] * in_image_size; const index_t out_batch_size = out_shape[1] * out_image_size; #pragma omp parallel for collapse(2) for (index_t b = 0; b < out_shape[0]; ++b) { for (index_t c = 0; c < out_shape[1]; ++c) { const index_t out_base = b * out_batch_size + c * out_image_size; const index_t in_base = b * in_batch_size + c * in_image_size; const index_t in_height = in_shape[2]; const index_t in_width = in_shape[3]; const index_t out_height = out_shape[2]; const index_t out_width = out_shape[3]; for (index_t h = 0; h < out_height; ++h) { for (index_t w = 0; w < out_width; ++w) { const index_t out_offset = out_base + h * out_width + w; float res = 0; int block_size = 0; for (int fh = 0; fh < filter_hw[0]; ++fh) { for (int fw = 0; fw < filter_hw[1]; ++fw) { index_t inh = h * stride_hw[0] + dilation_hw[0] * fh - pad_hw[0]; index_t inw = w * stride_hw[1] + dilation_hw[1] * fw - pad_hw[1]; if (inh >= 0 && inh < in_height && inw >= 0 && inw < in_width) { index_t input_offset = in_base + inh * in_width + inw; res += input[input_offset]; ++block_size; } } } output[out_offset] = res / block_size; } } } } } MaceStatus operator()(const Tensor *input_tensor, // NCHW Tensor *output_tensor, // NCHW StatsFuture *future) { MACE_UNUSED(future); std::vector<index_t> output_shape(4); std::vector<index_t> filter_shape = { input_tensor->dim(1), input_tensor->dim(1), kernels_[0], kernels_[1]}; std::vector<int> paddings(2); if (paddings_.empty()) { kernels::CalcNCHWPaddingAndOutputSize( input_tensor->shape().data(), filter_shape.data(), dilations_, strides_, padding_type_, output_shape.data(), paddings.data()); } else { paddings = paddings_; CalcNCHWOutputSize(input_tensor->shape().data(), filter_shape.data(), paddings_.data(), dilations_, strides_, RoundType::CEIL, output_shape.data()); } MACE_RETURN_IF_ERROR(output_tensor->Resize(output_shape)); Tensor::MappingGuard input_guard(input_tensor); Tensor::MappingGuard output_guard(output_tensor); const float *input = input_tensor->data<float>(); float *output = output_tensor->mutable_data<float>(); const index_t *input_shape = input_tensor->shape().data(); int pad_hw[2] = {paddings[0] / 2, paddings[1] / 2}; if (pooling_type_ == PoolingType::MAX) { MaxPooling(input, input_shape, output_shape.data(), kernels_, strides_, dilations_, pad_hw, output); } else if (pooling_type_ == PoolingType::AVG) { AvgPooling(input, input_shape, output_shape.data(), kernels_, strides_, dilations_, pad_hw, output); } else { MACE_NOT_IMPLEMENTED; } return MACE_SUCCESS; } }; template <> struct PoolingFunctor<DeviceType::CPU, uint8_t>: PoolingFunctorBase { PoolingFunctor(const PoolingType pooling_type, const int *kernels, const int *strides, const Padding padding_type, const std::vector<int> &paddings, const int *dilations) : PoolingFunctorBase( pooling_type, kernels, strides, padding_type, paddings, dilations) { } void MaxPooling(const uint8_t *input, const index_t *in_shape, const index_t *out_shape, const int *filter_hw, const int *stride_hw, const int *pad_hw, uint8_t *output) { #pragma omp parallel for collapse(3) for (index_t b = 0; b < out_shape[0]; ++b) { for (index_t h = 0; h < out_shape[1]; ++h) { for (index_t w = 0; w < out_shape[2]; ++w) { const index_t out_height = out_shape[1]; const index_t out_width = out_shape[2]; const index_t channels = out_shape[3]; const index_t in_height = in_shape[1]; const index_t in_width = in_shape[2]; const index_t in_h_base = h * stride_hw[0] - pad_hw[0]; const index_t in_w_base = w * stride_hw[1] - pad_hw[1]; const index_t in_h_begin = std::max<index_t>(0, in_h_base); const index_t in_w_begin = std::max<index_t>(0, in_w_base); const index_t in_h_end = std::min(in_height, in_h_base + filter_hw[0]); const index_t in_w_end = std::min(in_width, in_w_base + filter_hw[1]); uint8_t *out_ptr = output + ((b * out_height + h) * out_width + w) * channels; for (index_t ih = in_h_begin; ih < in_h_end; ++ih) { for (index_t iw = in_w_begin; iw < in_w_end; ++iw) { const uint8_t *in_ptr = input + ((b * in_height + ih) * in_width + iw) * channels; index_t c = 0; #if defined(MACE_ENABLE_NEON) for (; c <= channels - 16; c += 16) { uint8x16_t out_vec = vld1q_u8(out_ptr + c); uint8x16_t in_vec = vld1q_u8(in_ptr + c); out_vec = vmaxq_u8(out_vec, in_vec); vst1q_u8(out_ptr + c, out_vec); } for (; c <= channels - 8; c += 8) { uint8x8_t out_vec = vld1_u8(out_ptr + c); uint8x8_t in_vec = vld1_u8(in_ptr + c); out_vec = vmax_u8(out_vec, in_vec); vst1_u8(out_ptr + c, out_vec); } #endif for (; c < channels; ++c) { out_ptr[c] = std::max(out_ptr[c], in_ptr[c]); } } } } } } } void AvgPooling(const uint8_t *input, const index_t *in_shape, const index_t *out_shape, const int *filter_hw, const int *stride_hw, const int *pad_hw, uint8_t *output) { #pragma omp parallel for collapse(3) for (index_t b = 0; b < out_shape[0]; ++b) { for (index_t h = 0; h < out_shape[1]; ++h) { for (index_t w = 0; w < out_shape[2]; ++w) { const index_t out_height = out_shape[1]; const index_t out_width = out_shape[2]; const index_t channels = out_shape[3]; const index_t in_height = in_shape[1]; const index_t in_width = in_shape[2]; const index_t in_h_base = h * stride_hw[0] - pad_hw[0]; const index_t in_w_base = w * stride_hw[1] - pad_hw[1]; const index_t in_h_begin = std::max<index_t>(0, in_h_base); const index_t in_w_begin = std::max<index_t>(0, in_w_base); const index_t in_h_end = std::min(in_height, in_h_base + filter_hw[0]); const index_t in_w_end = std::min(in_width, in_w_base + filter_hw[1]); const index_t block_size = (in_h_end - in_h_begin) * (in_w_end - in_w_begin); MACE_CHECK(block_size > 0); std::vector<uint16_t> average_buffer(channels); uint16_t *avg_buffer = average_buffer.data(); std::fill_n(avg_buffer, channels, 0); for (index_t ih = in_h_begin; ih < in_h_end; ++ih) { for (index_t iw = in_w_begin; iw < in_w_end; ++iw) { const uint8_t *in_ptr = input + ((b * in_height + ih) * in_width + iw) * channels; index_t c = 0; #if defined(MACE_ENABLE_NEON) for (; c <= channels - 16; c += 16) { uint16x8_t avg_vec[2]; avg_vec[0] = vld1q_u16(avg_buffer + c); avg_vec[1] = vld1q_u16(avg_buffer + c + 8); uint8x16_t in_vec = vld1q_u8(in_ptr + c); avg_vec[0] = vaddw_u8(avg_vec[0], vget_low_u8(in_vec)); avg_vec[1] = vaddw_u8(avg_vec[1], vget_high_u8(in_vec)); vst1q_u16(avg_buffer + c, avg_vec[0]); vst1q_u16(avg_buffer + c + 8, avg_vec[1]); } for (; c <= channels - 8; c += 8) { uint16x8_t avg_vec = vld1q_u16(avg_buffer + c); uint8x8_t in_vec = vld1_u8(in_ptr + c); avg_vec = vaddw_u8(avg_vec, in_vec); vst1q_u16(avg_buffer + c, avg_vec); } #endif for (; c < channels; ++c) { avg_buffer[c] += in_ptr[c]; } } } uint8_t *out_ptr = output + ((b * out_height + h) * out_width + w) * channels; for (index_t c = 0; c < channels; ++c) { out_ptr[c] = static_cast<uint8_t>( (avg_buffer[c] + block_size / 2) / block_size); } } } } } MaceStatus operator()(const Tensor *input_tensor, // NHWC Tensor *output_tensor, // NHWC StatsFuture *future) { MACE_UNUSED(future); MACE_CHECK(dilations_[0] == 1 && dilations_[1] == 1, "Quantized pooling does not support dilation > 1 yet."); // Use the same scale and zero point with input and output. output_tensor->SetScale(input_tensor->scale()); output_tensor->SetZeroPoint(input_tensor->zero_point()); std::vector<index_t> output_shape(4); std::vector<index_t> filter_shape = { input_tensor->dim(3), kernels_[0], kernels_[1], input_tensor->dim(3)}; std::vector<int> paddings(2); if (paddings_.empty()) { CalcPaddingAndOutputSize(input_tensor->shape().data(), NHWC, filter_shape.data(), OHWI, dilations_, strides_, padding_type_, output_shape.data(), paddings.data()); } else { paddings = paddings_; CalcOutputSize(input_tensor->shape().data(), NHWC, filter_shape.data(), OHWI, paddings_.data(), dilations_, strides_, RoundType::CEIL, output_shape.data()); } MACE_RETURN_IF_ERROR(output_tensor->Resize(output_shape)); const index_t out_channels = output_tensor->dim(3); const index_t in_channels = input_tensor->dim(3); MACE_CHECK(out_channels == in_channels); Tensor::MappingGuard input_guard(input_tensor); Tensor::MappingGuard output_guard(output_tensor); const uint8_t *input = input_tensor->data<uint8_t>(); uint8_t *output = output_tensor->mutable_data<uint8_t>(); int pad_hw[2] = {paddings[0] / 2, paddings[1] / 2}; if (pooling_type_ == PoolingType::MAX) { MaxPooling(input, input_tensor->shape().data(), output_shape.data(), kernels_, strides_, pad_hw, output); } else if (pooling_type_ == PoolingType::AVG) { AvgPooling(input, input_tensor->shape().data(), output_shape.data(), kernels_, strides_, pad_hw, output); } else { MACE_NOT_IMPLEMENTED; } return MACE_SUCCESS; } }; #ifdef MACE_ENABLE_OPENCL template <typename T> struct PoolingFunctor<DeviceType::GPU, T> : PoolingFunctorBase { PoolingFunctor(const PoolingType pooling_type, const int *kernels, const int *strides, const Padding padding_type, const std::vector<int> &paddings, const int *dilations) : PoolingFunctorBase( pooling_type, kernels, strides, padding_type, paddings, dilations) { } MaceStatus operator()(const Tensor *input_tensor, Tensor *output_tensor, StatsFuture *future); cl::Kernel kernel_; uint32_t kwg_size_; std::unique_ptr<BufferBase> kernel_error_; std::vector<index_t> input_shape_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_POOLING_H_
GB_unaryop__lnot_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint32_uint32 // op(A') function: GB_tran__lnot_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint32_uint32 ( uint32_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pmv-OpenMP-reduction.c
/* * pmv-OpenMp-b.c * * Created on: 12/04/2014 * Author: Carlos de la Torre */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> // biblioteca para programas paralelos #define PRINT_ALL_MIN 15 // Ponemos que los elementos mínimos para que se // impriman todos los valores de la matriz sea 15 int main(int argc, char* argv[]) { int i,j,N,TIME; double tr, acumulador=0; // estas son las variables que sirven para medir el tiempo double t1, t2; switch (argc){ // coneste switch nos aseguramos de que la entrada de parametros sea correcta case 1: printf("Faltan las filas/columnas de la Matriz, y el tamaño del vector\n"); printf("\nUso: %s [numero] [0/1]\n",argv[0]); printf("\nDonde numero es el tamaño de las filas y las columnas de la matriz y el tamaño del vector\n"); printf("y el 0 o el 1 especifica si queremos solo los tiempos (1) o no\n"); exit(-1); break; case 2: N = atoi(argv[1]); // Este sera el tamaño del vector y de las filas/columnas de la matriz TIME = 0; break; case 3: N = atoi(argv[1]); // Este sera el tamaño del vector y de las filas/columnas de la matriz TIME = atoi(argv[2]); // si tiene un valor de 0 se imprime toda la info si tiene un valor de 1 se imprime solo el tiempo break; default: printf("La cantidad de parametros es incorrecta\n"); exit(-1); break; } int *vector, *Vresultado; int **Matriz; Matriz = (int**) malloc(N * sizeof(int*)); for (i = 0; i < N; i++) Matriz[i] = (int*) malloc(N * sizeof(int)); vector = (int*) malloc(N * sizeof(int)); //si no hay espacio suficiente malloc devuelve NULL Vresultado = (int*) malloc(N * sizeof(int)); if ((Matriz == NULL) || (vector == NULL) || (Vresultado == NULL)) { printf("Error en la reserva de espacio para los Vectores o Matriz\n"); exit(-2); } srand(time(NULL)); // esta es la semilla que se usa para los random #pragma omp parallel for private(i,j)// Inicializamos la Matriz y el vector for (i = 0; i < N; i++){ for (j = 0; j < N; j++){ Matriz[i][j] = 2; } vector[i] = 4; } // imprimimos la matriz y el vector si el tamaño de N es menor de PRINT_ALL_MIN if (N <= PRINT_ALL_MIN && TIME!=1){ printf ("\nEsta es la matriz: \n"); for (i = 0; i < N; i++){ for (j = 0; j < N; j++){ printf ("%d ",Matriz[i][j]); } printf ("\n"); } printf ("\nEste es el vector: \n"); for (i = 0; i < N; i++) printf ("%d ",vector[i]); printf("\n\n"); } t1 = omp_get_wtime(); // Calcular la multiplicación de una matriz por un vector #pragma omp parallel private (i,j) { for (i = 0; i < N; i++){ acumulador = 0; #pragma omp for reduction(+:acumulador) for (j = 0; j < N; j++){ acumulador = Matriz[i][j]*vector[j]; } #pragma omp single Vresultado[i]=acumulador; } } t2 = omp_get_wtime(); tr = t2 - t1; // Calculo el tiempo que he tardado en multiplicarlo // Ahora imprimimos por pantalla los resultados obtenidos segun las restricciones del problema if (N <= PRINT_ALL_MIN){ printf("Tiempo(seg.):%11.9f\nTamaño Matriz y Vector:%u\n",tr,N);// si queremos imprimir datos completos y N < PRINT_ALL_MIN printf ("Este es el vector resultante: \n"); printf("{"); for (i = 0; i < N; i++){ printf ("VR[%d]=%d, ",i,Vresultado[i]); } printf("}\n"); }else if (TIME==1) // si queremos imprimir unicamente el tiempo de cálculo printf("%11.9f\n",tr);// else{ // y si queremos imprimir el tiempo la primera y la ultima multiplicacón printf("Tiempo(seg.):%11.9f\n",tr); printf("Tamaño Matriz y Vector:%u\n",N); printf("(Matriz[0][0]=%d)*%d=%d\n",Matriz[0][0],vector[0],Matriz[0][0]*vector[0]); printf("(Matriz[%d][%d]=%d)*%d=%d\n",N-1,N-1,Matriz[N-1][N-1],vector[N-1],Matriz[N-1][N-1]*vector[N-1]); printf("VectorResultado[0]=%d\n",Vresultado[0]); printf("VectorResultado[%d]=%d\n",N-1,Vresultado[N-1]); } free(vector); free(Vresultado); for(i=0; i<N; i++) free(Matriz[i]); free(Matriz); return 0; }
cmontecarlo.c
#include <inttypes.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #ifdef WITHOPENMP #include <omp.h> #endif #include "io.h" #include "abbrev.h" #include "status.h" #include "rpacket.h" #include "cmontecarlo.h" /** Look for a place to insert a value in an inversely sorted float array. * * @param x an inversely (largest to lowest) sorted float array * @param x_insert a value to insert * @param imin lower bound * @param imax upper bound * * @return index of the next boundary to the left */ tardis_error_t reverse_binary_search (const double *x, double x_insert, int64_t imin, int64_t imax, int64_t * result) { /* Have in mind that *x points to a reverse sorted array. That is large values will have small indices and small ones will have large indices. */ tardis_error_t ret_val = TARDIS_ERROR_OK; if (x_insert > x[imin] || x_insert < x[imax]) { ret_val = TARDIS_ERROR_BOUNDS_ERROR; } else { int imid = (imin + imax) >> 1; while (imax - imin > 2) { if (x[imid] < x_insert) { imax = imid + 1; } else { imin = imid; } imid = (imin + imax) >> 1; } if (imax - imin == 2 && x_insert < x[imin + 1]) { *result = imin + 1; } else { *result = imin; } } return ret_val; } /** Insert a value in to an array of line frequencies * * @param nu array of line frequencies * @param nu_insert value of nu key * @param number_of_lines number of lines in the line list * * @return index of the next line ot the red. If the key value is redder than the reddest line returns number_of_lines. */ tardis_error_t line_search (const double *nu, double nu_insert, int64_t number_of_lines, int64_t * result) { tardis_error_t ret_val = TARDIS_ERROR_OK; int64_t imin = 0; int64_t imax = number_of_lines - 1; if (nu_insert > nu[imin]) { *result = imin; } else if (nu_insert < nu[imax]) { *result = imax + 1; } else { ret_val = reverse_binary_search (nu, nu_insert, imin, imax, result); *result = *result + 1; } return ret_val; } tardis_error_t binary_search (const double *x, double x_insert, int64_t imin, int64_t imax, int64_t * result) { /* Have in mind that *x points to a sorted array. Like [1,2,3,4,5,...] */ int imid; tardis_error_t ret_val = TARDIS_ERROR_OK; if (x_insert < x[imin] || x_insert > x[imax]) { ret_val = TARDIS_ERROR_BOUNDS_ERROR; } else { while (imax >= imin) { imid = (imin + imax) / 2; if (x[imid] == x_insert) { *result = imid; break; } else if (x[imid] < x_insert) { imin = imid + 1; } else { imax = imid - 1; } } if (imax - imid == 2 && x_insert < x[imin + 1]) { *result = imin; } else { *result = imin; } } return ret_val; } void angle_aberration_CMF_to_LF (rpacket_t *packet, const storage_model_t *storage) { if (storage->full_relativity) { double beta = rpacket_get_r (packet) * storage->inverse_time_explosion * INVERSE_C; double mu_0 = rpacket_get_mu (packet); rpacket_set_mu (packet, (mu_0 + beta) / (1.0 + beta * mu_0)); } } /** Transform the lab frame direction cosine to the CMF * * @param packet * @param storage * @param mu lab frame direction cosine * * @return CMF direction cosine */ double angle_aberration_LF_to_CMF (rpacket_t *packet, const storage_model_t *storage, double mu) { double beta = rpacket_get_r (packet) * storage->inverse_time_explosion * INVERSE_C; return (mu - beta) / (1.0 - beta * mu); } double rpacket_doppler_factor (const rpacket_t *packet, const storage_model_t *storage) { double beta = rpacket_get_r (packet) * storage->inverse_time_explosion * INVERSE_C; if (!storage->full_relativity) { return 1.0 - rpacket_get_mu (packet) * beta; } else { return (1.0 - rpacket_get_mu (packet) * beta) / sqrt (1 - beta * beta); } } double rpacket_inverse_doppler_factor (const rpacket_t *packet, const storage_model_t *storage) { double beta = rpacket_get_r (packet) * storage->inverse_time_explosion * INVERSE_C; if (!storage->full_relativity) { return 1.0 / (1.0 - rpacket_get_mu (packet) * beta); } else { return (1.0 + rpacket_get_mu (packet) * beta) / sqrt (1 - beta * beta); } } double bf_cross_section (const storage_model_t * storage, int64_t continuum_id, double comov_nu) { double bf_xsect; double *x_sect = storage->photo_xsect[continuum_id]->x_sect; double *nu = storage->photo_xsect[continuum_id]->nu; switch (storage->bf_treatment) { case LIN_INTERPOLATION: { int64_t result; tardis_error_t error = binary_search (nu, comov_nu, 0, storage->photo_xsect[continuum_id]->no_of_points - 1, &result); if (error == TARDIS_ERROR_BOUNDS_ERROR) { bf_xsect = 0.0; } else { bf_xsect = x_sect[result-1] + (comov_nu - nu[result-1]) / (nu[result] - nu[result-1]) * (x_sect[result] - x_sect[result-1]); } break; } case HYDROGENIC: { double nu_ratio = nu[0] / comov_nu; bf_xsect = x_sect[0] * nu_ratio * nu_ratio * nu_ratio; break; } default: fprintf (stderr, "(%d) is not a valid bound-free cross section treatment.\n", storage->bf_treatment); exit(1); } return bf_xsect; } void calculate_chi_bf (rpacket_t * packet, storage_model_t * storage) { double doppler_factor = rpacket_doppler_factor (packet, storage); double comov_nu = rpacket_get_nu (packet) * doppler_factor; int64_t no_of_continuum_edges = storage->no_of_edges; int64_t current_continuum_id; line_search(storage->continuum_list_nu, comov_nu, no_of_continuum_edges, &current_continuum_id); rpacket_set_current_continuum_id (packet, current_continuum_id); int64_t shell_id = rpacket_get_current_shell_id (packet); double T = storage->t_electrons[shell_id]; double boltzmann_factor = exp (-(H * comov_nu) / (KB * T)); double bf_helper = 0; for(int64_t i = current_continuum_id; i < no_of_continuum_edges; i++) { // get the level population for the level ijk in the current shell: double l_pop = storage->l_pop[shell_id * no_of_continuum_edges + i]; // get the level population ratio \frac{n_{0,j+1,k}}{n_{i,j,k}} \frac{n_{i,j,k}}{n_{0,j+1,k}}^{*}: double l_pop_r = storage->l_pop_r[shell_id * no_of_continuum_edges + i]; double bf_x_sect = bf_cross_section (storage, i, comov_nu); if (bf_x_sect == 0.0) { break; } bf_helper += l_pop * bf_x_sect * (1.0 - l_pop_r * boltzmann_factor) * doppler_factor; packet->chi_bf_tmp_partial[i] = bf_helper; } rpacket_set_chi_boundfree (packet, bf_helper); } void calculate_chi_ff (rpacket_t * packet, const storage_model_t * storage) { double doppler_factor = rpacket_doppler_factor (packet, storage); double comov_nu = rpacket_get_nu (packet) * doppler_factor; int64_t shell_id = rpacket_get_current_shell_id (packet); double T = storage->t_electrons[shell_id]; double boltzmann_factor = exp (-(H * comov_nu) / KB / T); double chi_ff_factor = storage->chi_ff_factor[shell_id]; double chi_ff = chi_ff_factor * (1 - boltzmann_factor) * pow (comov_nu, -3); rpacket_set_chi_freefree (packet, chi_ff * doppler_factor); } void compute_distance2boundary (rpacket_t * packet, const storage_model_t * storage) { double r = rpacket_get_r (packet); double mu = rpacket_get_mu (packet); double r_outer = storage->r_outer[rpacket_get_current_shell_id (packet)]; double r_inner = storage->r_inner[rpacket_get_current_shell_id (packet)]; double check, distance; if (mu > 0.0) { // direction outward rpacket_set_next_shell_id (packet, 1); distance = sqrt (r_outer * r_outer + ((mu * mu - 1.0) * r * r)) - (r * mu); } else { // going inward if ( (check = r_inner * r_inner + (r * r * (mu * mu - 1.0)) )>= 0.0) { // hit inner boundary rpacket_set_next_shell_id (packet, -1); distance = - r * mu - sqrt (check); } else { // miss inner boundary rpacket_set_next_shell_id (packet, 1); distance = sqrt (r_outer * r_outer + ((mu * mu - 1.0) * r * r)) - (r * mu); } } rpacket_set_d_boundary (packet, distance); } tardis_error_t compute_distance2line (rpacket_t * packet, const storage_model_t * storage) { if (!rpacket_get_last_line (packet)) { double r = rpacket_get_r (packet); double mu = rpacket_get_mu (packet); double nu = rpacket_get_nu (packet); double nu_line = rpacket_get_nu_line (packet); double distance, nu_diff; double ct = storage->time_explosion * C; double doppler_factor = rpacket_doppler_factor (packet, storage); double comov_nu = nu * doppler_factor; if ( (nu_diff = comov_nu - nu_line) >= 0) { if (!storage->full_relativity) { distance = (nu_diff / nu) * ct; } else { double nu_r = nu_line / nu; distance = - mu * r + (ct - nu_r * nu_r * sqrt(ct * ct - (1 + r * r * (1 - mu * mu) * (1 + pow (nu_r, -2))))) / (1 + nu_r * nu_r); } rpacket_set_d_line (packet, distance); return TARDIS_ERROR_OK; } else { if (rpacket_get_next_line_id (packet) == storage->no_of_lines - 1) { fprintf (stderr, "last_line = %f\n", storage-> line_list_nu[rpacket_get_next_line_id (packet) - 1]); fprintf (stderr, "Last line in line list reached!"); } else if (rpacket_get_next_line_id (packet) == 0) { fprintf (stderr, "First line in line list!"); fprintf (stderr, "next_line = %f\n", storage-> line_list_nu[rpacket_get_next_line_id (packet) + 1]); } else { fprintf (stderr, "last_line = %f\n", storage-> line_list_nu[rpacket_get_next_line_id (packet) - 1]); fprintf (stderr, "next_line = %f\n", storage-> line_list_nu[rpacket_get_next_line_id (packet) + 1]); } fprintf (stderr, "ERROR: Comoving nu less than nu_line!\n"); fprintf (stderr, "comov_nu = %f\n", comov_nu); fprintf (stderr, "nu_line = %f\n", nu_line); fprintf (stderr, "(comov_nu - nu_line) / nu_line = %f\n", (comov_nu - nu_line) / nu_line); fprintf (stderr, "r = %f\n", r); fprintf (stderr, "mu = %f\n", mu); fprintf (stderr, "nu = %f\n", nu); fprintf (stderr, "doppler_factor = %f\n", doppler_factor); fprintf (stderr, "cur_zone_id = %" PRIi64 "\n", rpacket_get_current_shell_id (packet)); return TARDIS_ERROR_COMOV_NU_LESS_THAN_NU_LINE; } } else { rpacket_set_d_line (packet, MISS_DISTANCE); return TARDIS_ERROR_OK; } } void compute_distance2continuum (rpacket_t * packet, storage_model_t * storage) { double chi_continuum, d_continuum; double chi_electron = storage->electron_densities[rpacket_get_current_shell_id(packet)] * storage->sigma_thomson; if (storage->full_relativity) { chi_electron *= rpacket_doppler_factor (packet, storage); } if (storage->cont_status == CONTINUUM_ON) { if (packet->compute_chi_bf) { calculate_chi_bf (packet, storage); calculate_chi_ff (packet, storage); } else { packet->compute_chi_bf=true; } chi_continuum = rpacket_get_chi_boundfree (packet) + rpacket_get_chi_freefree (packet) + chi_electron; d_continuum = rpacket_get_tau_event (packet) / chi_continuum; } else { chi_continuum = chi_electron; d_continuum = storage->inverse_electron_densities[rpacket_get_current_shell_id (packet)] * storage->inverse_sigma_thomson * rpacket_get_tau_event (packet); } if (rpacket_get_virtual_packet(packet) > 0) { //Set all continuum distances to MISS_DISTANCE in case of an virtual_packet d_continuum = MISS_DISTANCE; packet->compute_chi_bf = false; } else { // fprintf(stderr, "--------\n"); // fprintf(stderr, "nu = %e \n", rpacket_get_nu(packet)); // fprintf(stderr, "chi_electron = %e\n", chi_electron); // fprintf(stderr, "chi_boundfree = %e\n", calculate_chi_bf(packet, storage)); // fprintf(stderr, "chi_line = %e \n", rpacket_get_tau_event(packet) / rpacket_get_d_line(packet)); // fprintf(stderr, "--------\n"); //rpacket_set_chi_freefree(packet, chi_freefree); rpacket_set_chi_electron (packet, chi_electron); } rpacket_set_chi_continuum (packet, chi_continuum); rpacket_set_d_continuum (packet, d_continuum); } void macro_atom (rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state) { int emit = 0, i = 0, offset = -1; uint64_t activate_level = rpacket_get_macro_atom_activation_level (packet); while (emit >= 0) { double event_random = rk_double (mt_state); i = storage->macro_block_references[activate_level] - 1; double p = 0.0; offset = storage->transition_probabilities_nd * rpacket_get_current_shell_id (packet); do { ++i; p += storage->transition_probabilities[offset + i]; } while (p <= event_random); emit = storage->transition_type[i]; activate_level = storage->destination_level_id[i]; } switch (emit) { case BB_EMISSION: line_emission (packet, storage, storage->transition_line_id[i], mt_state); break; case BF_EMISSION: rpacket_set_current_continuum_id (packet, storage->transition_line_id[i]); storage->last_line_interaction_out_id[rpacket_get_id (packet)] = rpacket_get_current_continuum_id (packet); continuum_emission (packet, storage, mt_state, sample_nu_free_bound, 3); break; case FF_EMISSION: continuum_emission (packet, storage, mt_state, sample_nu_free_free, 4); break; case ADIABATIC_COOLING: storage->last_interaction_type[rpacket_get_id (packet)] = 5; rpacket_set_status (packet, TARDIS_PACKET_STATUS_REABSORBED); break; default: fprintf (stderr, "This process for macro-atom deactivation should not exist! (emit = %d)\n", emit); exit(1); } } void move_packet (rpacket_t * packet, storage_model_t * storage, double distance) { double doppler_factor = rpacket_doppler_factor (packet, storage); if (distance > 0.0) { double r = rpacket_get_r (packet); double new_r = sqrt (r * r + distance * distance + 2.0 * r * distance * rpacket_get_mu (packet)); rpacket_set_mu (packet, (rpacket_get_mu (packet) * r + distance) / new_r); rpacket_set_r (packet, new_r); if (rpacket_get_virtual_packet (packet) <= 0) { double comov_energy = rpacket_get_energy (packet) * doppler_factor; double comov_nu = rpacket_get_nu (packet) * doppler_factor; if (storage->full_relativity) { distance *= doppler_factor; } #ifdef WITHOPENMP #pragma omp atomic #endif storage->js[rpacket_get_current_shell_id (packet)] += comov_energy * distance; #ifdef WITHOPENMP #pragma omp atomic #endif storage->nubars[rpacket_get_current_shell_id (packet)] += comov_energy * distance * comov_nu; if (storage->cont_status) { increment_continuum_estimators(packet, storage, distance, comov_nu, comov_energy); } } } } void increment_continuum_estimators (const rpacket_t * packet, storage_model_t * storage, double distance, double comov_nu, double comov_energy) { int64_t current_continuum_id; int64_t no_of_continuum_edges = storage->no_of_edges; int64_t shell_id = rpacket_get_current_shell_id (packet); line_search(storage->continuum_list_nu, comov_nu, no_of_continuum_edges, &current_continuum_id); double T = storage->t_electrons[shell_id]; double boltzmann_factor = exp (-(H * comov_nu) / (KB * T)); #ifdef WITHOPENMP #pragma omp atomic #endif storage->ff_heating_estimator[shell_id] += comov_energy * distance * rpacket_get_chi_freefree (packet); for(int64_t i = current_continuum_id; i < no_of_continuum_edges; i++) { double bf_xsect = bf_cross_section (storage, i, comov_nu); int64_t photo_ion_idx = i * storage->no_of_shells + shell_id; double photo_ion_estimator_helper = comov_energy * distance * bf_xsect / comov_nu; double bf_heating_estimator_helper = comov_energy * distance * bf_xsect * (1. - storage->continuum_list_nu[i] / comov_nu); #ifdef WITHOPENMP #pragma omp atomic #endif storage->photo_ion_estimator[photo_ion_idx] += photo_ion_estimator_helper; #ifdef WITHOPENMP #pragma omp atomic #endif storage->stim_recomb_estimator[photo_ion_idx] += photo_ion_estimator_helper * boltzmann_factor; #ifdef WITHOPENMP #pragma omp atomic #endif storage->bf_heating_estimator[photo_ion_idx] += bf_heating_estimator_helper; #ifdef WITHOPENMP #pragma omp atomic #endif storage->stim_recomb_cooling_estimator[photo_ion_idx] += bf_heating_estimator_helper * boltzmann_factor; if (photo_ion_estimator_helper != 0.0) { #ifdef WITHOPENMP #pragma omp atomic #endif storage->photo_ion_estimator_statistics[photo_ion_idx] += 1; } else { break; } } } double get_increment_j_blue_estimator_energy (const rpacket_t * packet, const storage_model_t * storage, double d_line) { double energy; if (storage->full_relativity) { // Accurate up to a factor 1 / gamma energy = rpacket_get_energy (packet); } else { double r = rpacket_get_r (packet); double r_interaction = sqrt (r * r + d_line * d_line + 2.0 * r * d_line * rpacket_get_mu (packet)); double mu_interaction = (rpacket_get_mu (packet) * r + d_line) / r_interaction; double doppler_factor = 1.0 - mu_interaction * r_interaction * storage->inverse_time_explosion * INVERSE_C; energy = rpacket_get_energy (packet) * doppler_factor; } return energy; } void increment_j_blue_estimator (const rpacket_t * packet, storage_model_t * storage, double d_line, int64_t j_blue_idx) { if (storage->line_lists_j_blues != NULL) { double energy = get_increment_j_blue_estimator_energy (packet, storage, d_line); #ifdef WITHOPENMP #pragma omp atomic #endif storage->line_lists_j_blues[j_blue_idx] += energy / rpacket_get_nu (packet); } } void increment_Edotlu_estimator (const rpacket_t * packet, storage_model_t * storage, double d_line, int64_t line_idx) { if (storage->line_lists_Edotlu != NULL) { double energy = get_increment_j_blue_estimator_energy (packet, storage, d_line); #ifdef WITHOPENMP #pragma omp atomic #endif storage->line_lists_Edotlu[line_idx] += energy; } } int64_t montecarlo_one_packet (storage_model_t * storage, rpacket_t * packet, int64_t virtual_mode, rk_state *mt_state) { int64_t reabsorbed=-1; if (virtual_mode == 0) { reabsorbed = montecarlo_one_packet_loop (storage, packet, 0, mt_state); } else { if ((rpacket_get_nu (packet) > storage->spectrum_virt_start_nu) && (rpacket_get_nu(packet) < storage->spectrum_virt_end_nu)) { for (int64_t i = 0; i < rpacket_get_virtual_packet_flag (packet); i++) { double weight; rpacket_t virt_packet = *packet; double mu_min; if (rpacket_get_r(&virt_packet) > storage->r_inner[0]) { mu_min = -1.0 * sqrt (1.0 - (storage->r_inner[0] / rpacket_get_r(&virt_packet)) * (storage->r_inner[0] / rpacket_get_r(&virt_packet))); if (storage->full_relativity) { // Need to transform the angular size of the photosphere into the CMF mu_min = angle_aberration_LF_to_CMF (&virt_packet, storage, mu_min); } } else { mu_min = 0.0; } double mu_bin = (1.0 - mu_min) / rpacket_get_virtual_packet_flag (packet); rpacket_set_mu(&virt_packet,mu_min + (i + rk_double (mt_state)) * mu_bin); switch (virtual_mode) { case -2: weight = 1.0 / rpacket_get_virtual_packet_flag (packet); break; case -1: weight = 2.0 * rpacket_get_mu(&virt_packet) / rpacket_get_virtual_packet_flag (packet); break; case 1: weight = (1.0 - mu_min) / 2.0 / rpacket_get_virtual_packet_flag (packet); break; default: fprintf (stderr, "Something has gone horribly wrong!\n"); // FIXME MR: we need to somehow signal an error here // I'm adding an exit() here to inform the compiler about the impossible path exit(1); } angle_aberration_CMF_to_LF (&virt_packet, storage); double doppler_factor_ratio = rpacket_doppler_factor (packet, storage) / rpacket_doppler_factor (&virt_packet, storage); rpacket_set_energy(&virt_packet, rpacket_get_energy (packet) * doppler_factor_ratio); rpacket_set_nu(&virt_packet,rpacket_get_nu (packet) * doppler_factor_ratio); reabsorbed = montecarlo_one_packet_loop (storage, &virt_packet, 1, mt_state); #ifdef WITH_VPACKET_LOGGING #ifdef WITHOPENMP #pragma omp critical { #endif // WITHOPENMP if (storage->virt_packet_count >= storage->virt_array_size) { storage->virt_array_size *= 2; storage->virt_packet_nus = safe_realloc(storage->virt_packet_nus, sizeof(double) * storage->virt_array_size); storage->virt_packet_energies = safe_realloc(storage->virt_packet_energies, sizeof(double) * storage->virt_array_size); storage->virt_packet_last_interaction_in_nu = safe_realloc(storage->virt_packet_last_interaction_in_nu, sizeof(double) * storage->virt_array_size); storage->virt_packet_last_interaction_type = safe_realloc(storage->virt_packet_last_interaction_type, sizeof(int64_t) * storage->virt_array_size); storage->virt_packet_last_line_interaction_in_id = safe_realloc(storage->virt_packet_last_line_interaction_in_id, sizeof(int64_t) * storage->virt_array_size); storage->virt_packet_last_line_interaction_out_id = safe_realloc(storage->virt_packet_last_line_interaction_out_id, sizeof(int64_t) * storage->virt_array_size); } storage->virt_packet_nus[storage->virt_packet_count] = rpacket_get_nu(&virt_packet); storage->virt_packet_energies[storage->virt_packet_count] = rpacket_get_energy(&virt_packet) * weight; storage->virt_packet_last_interaction_in_nu[storage->virt_packet_count] = storage->last_interaction_in_nu[rpacket_get_id (packet)]; storage->virt_packet_last_interaction_type[storage->virt_packet_count] = storage->last_interaction_type[rpacket_get_id (packet)]; storage->virt_packet_last_line_interaction_in_id[storage->virt_packet_count] = storage->last_line_interaction_in_id[rpacket_get_id (packet)]; storage->virt_packet_last_line_interaction_out_id[storage->virt_packet_count] = storage->last_line_interaction_out_id[rpacket_get_id (packet)]; storage->virt_packet_count += 1; #ifdef WITHOPENMP } #endif // WITHOPENMP #endif // WITH_VPACKET_LOGGING if ((rpacket_get_nu(&virt_packet) < storage->spectrum_end_nu) && (rpacket_get_nu(&virt_packet) > storage->spectrum_start_nu)) { #ifdef WITHOPENMP #pragma omp critical { #endif // WITHOPENMP int64_t virt_id_nu = floor ((rpacket_get_nu(&virt_packet) - storage->spectrum_start_nu) / storage->spectrum_delta_nu); storage->spectrum_virt_nu[virt_id_nu] += rpacket_get_energy(&virt_packet) * weight; #ifdef WITHOPENMP } #endif // WITHOPENMP } } } else { return 1; } } return reabsorbed; } void move_packet_across_shell_boundary (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state) { move_packet (packet, storage, distance); if (rpacket_get_virtual_packet (packet) > 0) { double delta_tau_event = rpacket_get_chi_continuum(packet) * distance; rpacket_set_tau_event (packet, rpacket_get_tau_event (packet) + delta_tau_event); packet->compute_chi_bf = true; } else { rpacket_reset_tau_event (packet, mt_state); } if ((rpacket_get_current_shell_id (packet) < storage->no_of_shells - 1 && rpacket_get_next_shell_id (packet) == 1) || (rpacket_get_current_shell_id (packet) > 0 && rpacket_get_next_shell_id (packet) == -1)) { rpacket_set_current_shell_id (packet, rpacket_get_current_shell_id (packet) + rpacket_get_next_shell_id (packet)); } else if (rpacket_get_next_shell_id (packet) == 1) { rpacket_set_status (packet, TARDIS_PACKET_STATUS_EMITTED); } else if ((storage->reflective_inner_boundary == 0) || (rk_double (mt_state) > storage->inner_boundary_albedo)) { rpacket_set_status (packet, TARDIS_PACKET_STATUS_REABSORBED); } else { double doppler_factor = rpacket_doppler_factor (packet, storage); double comov_nu = rpacket_get_nu (packet) * doppler_factor; double comov_energy = rpacket_get_energy (packet) * doppler_factor; // TODO: correct rpacket_set_mu (packet, rk_double (mt_state)); double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); rpacket_set_nu (packet, comov_nu * inverse_doppler_factor); rpacket_set_energy (packet, comov_energy * inverse_doppler_factor); if (rpacket_get_virtual_packet_flag (packet) > 0) { montecarlo_one_packet (storage, packet, -2, mt_state); } } } void montecarlo_thomson_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state) { move_packet (packet, storage, distance); double doppler_factor = rpacket_doppler_factor (packet, storage); double comov_nu = rpacket_get_nu (packet) * doppler_factor; double comov_energy = rpacket_get_energy (packet) * doppler_factor; rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0); double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); rpacket_set_nu (packet, comov_nu * inverse_doppler_factor); rpacket_set_energy (packet, comov_energy * inverse_doppler_factor); rpacket_reset_tau_event (packet, mt_state); storage->last_interaction_type[rpacket_get_id (packet)] = 1; angle_aberration_CMF_to_LF (packet, storage); if (rpacket_get_virtual_packet_flag (packet) > 0) { create_vpacket (storage, packet, mt_state); } } void montecarlo_bound_free_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state) { // current position in list of continuum edges -> indicates which bound-free processes are possible int64_t ccontinuum = rpacket_get_current_continuum_id (packet); // Determine in which continuum the bf-absorption occurs double chi_bf = rpacket_get_chi_boundfree (packet); double zrand = rk_double (mt_state); double zrand_x_chibf = zrand * chi_bf; while ((ccontinuum < storage->no_of_edges - 1) && (packet->chi_bf_tmp_partial[ccontinuum] <= zrand_x_chibf)) { ccontinuum++; } rpacket_set_current_continuum_id (packet, ccontinuum); /* For consistency reasons the branching between ionization and thermal energy is determined using the comoving frequency at the initial position instead of the frequency at the point of interaction */ double comov_nu = rpacket_get_nu (packet) * rpacket_doppler_factor (packet, storage); /* Move the packet to the place of absorption, select a direction for re-emission and impose energy conservation in the co-moving frame. */ move_packet (packet, storage, distance); double old_doppler_factor = rpacket_doppler_factor (packet, storage); rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0); double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); double comov_energy = rpacket_get_energy (packet) * old_doppler_factor; rpacket_set_energy (packet, comov_energy * inverse_doppler_factor); storage->last_interaction_type[rpacket_get_id (packet)] = 3; // last interaction was a bf-absorption storage->last_line_interaction_in_id[rpacket_get_id (packet)] = ccontinuum; // Convert the rpacket to thermal or ionization energy zrand = rk_double (mt_state); int64_t activate_level = (zrand < storage->continuum_list_nu[ccontinuum] / comov_nu) ? storage->cont_edge2macro_level[ccontinuum] : storage->kpacket2macro_level; rpacket_set_macro_atom_activation_level (packet, activate_level); macro_atom (packet, storage, mt_state); } void montecarlo_free_free_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state) { /* Move the packet to the place of absorption, select a direction for re-emission and impose energy conservation in the co-moving frame. */ move_packet (packet, storage, distance); double old_doppler_factor = rpacket_doppler_factor (packet, storage); rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0); double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); double comov_energy = rpacket_get_energy (packet) * old_doppler_factor; rpacket_set_energy (packet, comov_energy * inverse_doppler_factor); storage->last_interaction_type[rpacket_get_id (packet)] = 4; // last interaction was a ff-absorption // Create a k-packet rpacket_set_macro_atom_activation_level (packet, storage->kpacket2macro_level); macro_atom (packet, storage, mt_state); } double sample_nu_free_free (const rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state) { int64_t shell_id = rpacket_get_current_shell_id (packet); double T = storage->t_electrons[shell_id]; double zrand = rk_double (mt_state); return -KB * T / H * log(zrand); // Lucy 2003 MC II Eq.41 } double sample_nu_free_bound (const rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state) { int64_t continuum_id = rpacket_get_current_continuum_id (packet); double th_frequency = storage->continuum_list_nu[continuum_id]; int64_t shell_id = rpacket_get_current_shell_id (packet); double T = storage->t_electrons[shell_id]; double zrand = rk_double (mt_state); return th_frequency * (1 - (KB * T / H / th_frequency * log(zrand))); // Lucy 2003 MC II Eq.26 } void montecarlo_line_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state) { uint64_t next_line_id = rpacket_get_next_line_id (packet); uint64_t line2d_idx = next_line_id + storage->no_of_lines * rpacket_get_current_shell_id (packet); if (rpacket_get_virtual_packet (packet) == 0) { increment_j_blue_estimator (packet, storage, distance, line2d_idx); increment_Edotlu_estimator (packet, storage, distance, line2d_idx); } double tau_line = storage->line_lists_tau_sobolevs[line2d_idx]; double tau_continuum = rpacket_get_chi_continuum(packet) * distance; double tau_combined = tau_line + tau_continuum; //rpacket_set_next_line_id (packet, rpacket_get_next_line_id (packet) + 1); if (next_line_id + 1 == storage->no_of_lines) { rpacket_set_last_line (packet, true); } if (rpacket_get_virtual_packet (packet) > 0) { rpacket_set_tau_event (packet, rpacket_get_tau_event (packet) + tau_line); rpacket_set_next_line_id (packet, next_line_id + 1); test_for_close_line (packet, storage); } else if (rpacket_get_tau_event (packet) < tau_combined) { // Line absorption occurs move_packet (packet, storage, distance); double old_doppler_factor = rpacket_doppler_factor (packet, storage); rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0); double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); double comov_energy = rpacket_get_energy (packet) * old_doppler_factor; rpacket_set_energy (packet, comov_energy * inverse_doppler_factor); storage->last_interaction_in_nu[rpacket_get_id (packet)] = rpacket_get_nu (packet); storage->last_line_interaction_in_id[rpacket_get_id (packet)] = next_line_id; storage->last_line_interaction_shell_id[rpacket_get_id (packet)] = rpacket_get_current_shell_id (packet); storage->last_interaction_type[rpacket_get_id (packet)] = 2; if (storage->line_interaction_id == 0) { line_emission (packet, storage, next_line_id, mt_state); } else if (storage->line_interaction_id >= 1) { rpacket_set_macro_atom_activation_level (packet, storage->line2macro_level_upper[next_line_id]); macro_atom (packet, storage, mt_state); } } else { // Packet passes line without interacting rpacket_set_tau_event (packet, rpacket_get_tau_event (packet) - tau_line); rpacket_set_next_line_id (packet, next_line_id + 1); packet->compute_chi_bf = false; test_for_close_line (packet, storage); } } void line_emission (rpacket_t * packet, storage_model_t * storage, int64_t emission_line_id, rk_state *mt_state) { double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); storage->last_line_interaction_out_id[rpacket_get_id (packet)] = emission_line_id; if (storage->cont_status == CONTINUUM_ON) { storage->last_interaction_out_type[rpacket_get_id (packet)] = 2; } rpacket_set_nu (packet, storage->line_list_nu[emission_line_id] * inverse_doppler_factor); rpacket_set_nu_line (packet, storage->line_list_nu[emission_line_id]); rpacket_set_next_line_id (packet, emission_line_id + 1); rpacket_reset_tau_event (packet, mt_state); angle_aberration_CMF_to_LF (packet, storage); if (rpacket_get_virtual_packet_flag (packet) > 0) { bool virtual_close_line = false; if (!rpacket_get_last_line (packet) && fabs (storage->line_list_nu[rpacket_get_next_line_id (packet)] - rpacket_get_nu_line (packet)) < (rpacket_get_nu_line (packet)* 1e-7)) { virtual_close_line = true; } // QUESTIONABLE!!! bool old_close_line = rpacket_get_close_line (packet); rpacket_set_close_line (packet, virtual_close_line); create_vpacket (storage, packet, mt_state); rpacket_set_close_line (packet, old_close_line); virtual_close_line = false; } test_for_close_line (packet, storage); } void test_for_close_line (rpacket_t * packet, const storage_model_t * storage) { if (!rpacket_get_last_line (packet) && fabs (storage->line_list_nu[rpacket_get_next_line_id (packet)] - rpacket_get_nu_line (packet)) < (rpacket_get_nu_line (packet)* 1e-7)) { rpacket_set_close_line (packet, true); } } void continuum_emission (rpacket_t * packet, storage_model_t * storage, rk_state *mt_state, pt2sample_nu sample_nu_continuum, int64_t emission_type_id) { double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage); double nu_comov = sample_nu_continuum (packet, storage, mt_state); rpacket_set_nu (packet, nu_comov * inverse_doppler_factor); rpacket_reset_tau_event (packet, mt_state); storage->last_interaction_out_type[rpacket_get_id (packet)] = emission_type_id; // Have to find current position in line list int64_t current_line_id; line_search (storage->line_list_nu, nu_comov, storage->no_of_lines, &current_line_id); bool last_line = (current_line_id == storage->no_of_lines); rpacket_set_last_line (packet, last_line); rpacket_set_next_line_id (packet, current_line_id); angle_aberration_CMF_to_LF (packet, storage); if (rpacket_get_virtual_packet_flag (packet) > 0) { create_vpacket (storage, packet, mt_state); } } static void montecarlo_compute_distances (rpacket_t * packet, storage_model_t * storage) { // Check if the last line was the same nu as the current line. if (rpacket_get_close_line (packet)) { // If so set the distance to the line to 0.0 rpacket_set_d_line (packet, 0.0); // Reset close_line. rpacket_set_close_line (packet, false); } else { compute_distance2boundary (packet, storage); compute_distance2line (packet, storage); // FIXME MR: return status of compute_distance2line() is ignored compute_distance2continuum (packet, storage); } } montecarlo_event_handler_t get_event_handler (rpacket_t * packet, storage_model_t * storage, double *distance, rk_state *mt_state) { montecarlo_compute_distances (packet, storage); double d_boundary = rpacket_get_d_boundary (packet); double d_continuum = rpacket_get_d_continuum (packet); double d_line = rpacket_get_d_line (packet); montecarlo_event_handler_t handler; if (d_line <= d_boundary && d_line <= d_continuum) { *distance = d_line; handler = &montecarlo_line_scatter; } else if (d_boundary <= d_continuum) { *distance = d_boundary; handler = &move_packet_across_shell_boundary; } else { *distance = d_continuum; handler = montecarlo_continuum_event_handler (packet, storage, mt_state); } return handler; } montecarlo_event_handler_t montecarlo_continuum_event_handler (rpacket_t * packet, storage_model_t * storage, rk_state *mt_state) { if (storage->cont_status) { double zrand_x_chi_cont = rk_double (mt_state) * rpacket_get_chi_continuum (packet); double chi_th = rpacket_get_chi_electron (packet); double chi_bf = rpacket_get_chi_boundfree (packet); if (zrand_x_chi_cont < chi_th) { return &montecarlo_thomson_scatter; } else if (zrand_x_chi_cont < chi_th + chi_bf) { return &montecarlo_bound_free_scatter; } else { return &montecarlo_free_free_scatter; } } else { return &montecarlo_thomson_scatter; } } int64_t montecarlo_one_packet_loop (storage_model_t * storage, rpacket_t * packet, int64_t virtual_packet, rk_state *mt_state) { rpacket_set_tau_event (packet, 0.0); rpacket_set_nu_line (packet, 0.0); rpacket_set_virtual_packet (packet, virtual_packet); rpacket_set_status (packet, TARDIS_PACKET_STATUS_IN_PROCESS); // Initializing tau_event if it's a real packet. if (virtual_packet == 0) { rpacket_reset_tau_event (packet,mt_state); } // For a virtual packet tau_event is the sum of all the tau's that the packet passes. while (rpacket_get_status (packet) == TARDIS_PACKET_STATUS_IN_PROCESS) { // Check if we are at the end of line list. if (!rpacket_get_last_line (packet)) { rpacket_set_nu_line (packet, storage-> line_list_nu[rpacket_get_next_line_id (packet)]); } double distance; get_event_handler (packet, storage, &distance, mt_state) (packet, storage, distance, mt_state); if (virtual_packet > 0 && rpacket_get_tau_event (packet) > storage->tau_russian) { double event_random = rk_double (mt_state); if (event_random > storage->survival_probability) { rpacket_set_energy(packet, 0.0); rpacket_set_status (packet, TARDIS_PACKET_STATUS_EMITTED); } else { rpacket_set_energy(packet, rpacket_get_energy (packet) / storage->survival_probability * exp (-1.0 * rpacket_get_tau_event (packet))); rpacket_set_tau_event (packet, 0.0); } } } if (virtual_packet > 0) { rpacket_set_energy (packet, rpacket_get_energy (packet) * exp (-1.0 * rpacket_get_tau_event (packet))); } return rpacket_get_status (packet) == TARDIS_PACKET_STATUS_REABSORBED ? 1 : 0; } void montecarlo_main_loop(storage_model_t * storage, int64_t virtual_packet_flag, int nthreads, unsigned long seed) { int64_t finished_packets = 0; storage->virt_packet_count = 0; #ifdef WITH_VPACKET_LOGGING storage->virt_packet_nus = (double *)safe_malloc(sizeof(double) * storage->no_of_packets); storage->virt_packet_energies = (double *)safe_malloc(sizeof(double) * storage->no_of_packets); storage->virt_packet_last_interaction_in_nu = (double *)safe_malloc(sizeof(double) * storage->no_of_packets); storage->virt_packet_last_interaction_type = (int64_t *)safe_malloc(sizeof(int64_t) * storage->no_of_packets); storage->virt_packet_last_line_interaction_in_id = (int64_t *)safe_malloc(sizeof(int64_t) * storage->no_of_packets); storage->virt_packet_last_line_interaction_out_id = (int64_t *)safe_malloc(sizeof(int64_t) * storage->no_of_packets); storage->virt_array_size = storage->no_of_packets; #endif // WITH_VPACKET_LOGGING #ifdef WITHOPENMP omp_set_dynamic(0); if (nthreads > 0) { omp_set_num_threads(nthreads); } #pragma omp parallel firstprivate(finished_packets) { rk_state mt_state; rk_seed (seed + omp_get_thread_num(), &mt_state); #pragma omp master { fprintf(stderr, "Running with OpenMP - %d threads\n", omp_get_num_threads()); print_progress(0, storage->no_of_packets); } #else rk_state mt_state; rk_seed (seed, &mt_state); fprintf(stderr, "Running without OpenMP\n"); #endif int64_t chi_bf_tmp_size = (storage->cont_status) ? storage->no_of_edges : 0; double *chi_bf_tmp_partial = safe_malloc(sizeof(double) * chi_bf_tmp_size); #pragma omp for for (int64_t packet_index = 0; packet_index < storage->no_of_packets; ++packet_index) { int reabsorbed = 0; rpacket_t packet; rpacket_set_id(&packet, packet_index); rpacket_init(&packet, storage, packet_index, virtual_packet_flag, chi_bf_tmp_partial); if (virtual_packet_flag > 0) { reabsorbed = montecarlo_one_packet(storage, &packet, -1, &mt_state); } reabsorbed = montecarlo_one_packet(storage, &packet, 0, &mt_state); storage->output_nus[packet_index] = rpacket_get_nu(&packet); if (reabsorbed == 1) { storage->output_energies[packet_index] = -rpacket_get_energy(&packet); } else { storage->output_energies[packet_index] = rpacket_get_energy(&packet); } if ( ++finished_packets%100 == 0 ) { #ifdef WITHOPENMP // WARNING: This only works with a static sheduler and gives an approximation of progress. // The alternative would be to have a shared variable but that could potentially decrease performance when using many threads. if (omp_get_thread_num() == 0 ) print_progress(finished_packets * omp_get_num_threads(), storage->no_of_packets); #else print_progress(finished_packets, storage->no_of_packets); #endif } } free(chi_bf_tmp_partial); #ifdef WITHOPENMP } #endif print_progress(storage->no_of_packets, storage->no_of_packets); fprintf(stderr,"\n"); } void create_vpacket (storage_model_t * storage, rpacket_t * packet, rk_state *mt_state) { if (storage->enable_biasing) { int64_t shell_id = rpacket_get_current_shell_id(packet); double tau_bias = (storage->tau_bias[shell_id + 1] + (storage->tau_bias[shell_id] - storage->tau_bias[shell_id + 1]) * (storage->r_outer[shell_id] - rpacket_get_r (packet)) / (storage->r_outer[shell_id] - storage->r_inner[shell_id])); double vpacket_prob = exp(-tau_bias); double event_random = rk_double (mt_state); if (event_random < vpacket_prob) { packet->vpacket_weight = 1. / vpacket_prob; montecarlo_one_packet (storage, packet, 1, mt_state); } } else { montecarlo_one_packet (storage, packet, 1, mt_state); } }
GB_unop__one_bool_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_bool_bool // op(A') function: GB_unop_tran__one_bool_bool // C type: bool // A type: bool // cast: ; // unaryop: cij = true #define GB_ATYPE \ bool #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = true ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_bool_bool ( bool *Cx, // Cx and Ax may be aliased const bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = true ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_bool_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
opi.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> int main(int argc, char **argv) { //seed random number generator // Q2b: get the number of threads to run with from agrv and // add OpenMP API code to set number of threads here if (argv <= 1) { exit(1); } int arg1 = atoi(argv[0]); omp_set_num_threads(10); int Nthreads = 1; struct drand48_data *drandData; drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data)); // Q2c: add an OpenMP parallel region here, wherein each thread initializes // one entry in drandData using srand48_r and seed based on thread number #pragma omp parallel { long int seed = 0; int rank = omp_get_thread_num(); srand48_r(seed, drandData+rank); } long long int Ntrials = 10000000; //need running tallies long long int Ntotal=0; long long int Ncircle=0; for (long long int n=0; n<Ntrials; n++) { double rand1; double rand2; //gererate two random numbers (use the thread id to offset drandData) drand48_r(drandData+0, &rand1); drand48_r(drandData+0, &rand2); double x = -1 + 2*rand1; //shift to [-1,1] double y = -1 + 2*rand2; //check if its in the circle if (sqrt(x*x+y*y)<=1) Ncircle++; Ntotal++; if (n%100 ==0) { double pi = 4.0*Ncircle/ (double) (n); printf("Our estimate of pi is %g \n", pi); } } double pi = 4.0*Ncircle/ (double) (Ntotal); printf("Our final estimate of pi is %g \n", pi); free(drandData); return 0; }
opencl_keyring_fmt_plug.c
/* * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>, * Copyright (c) 2012 Dhiru Kholia <dhiru at openwall.com> and * Copyright (c) 2012-2014 magnum * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_keyring; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_keyring); #else #include <string.h> #include "aes.h" #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "misc.h" #include "common-opencl.h" #include "options.h" #include "sha2.h" #include "md5.h" #include "stdint.h" #define FORMAT_LABEL "keyring-opencl" #define FORMAT_NAME "GNOME Keyring" #define FORMAT_TAG "$keyring$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "SHA256 OpenCL AES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define PLAINTEXT_LENGTH (55-8) #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #define SALTLEN 8 typedef unsigned char guchar; /* How many aliases do we need?! */ typedef unsigned int guint; typedef int gint; typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } keyring_password; typedef struct { uint8_t key[16]; uint8_t iv[16]; } keyring_hash; typedef struct { uint32_t length; uint32_t iterations; uint8_t salt[SALTLEN]; } keyring_salt; static int *cracked; static int any_cracked; static struct custom_salt { unsigned int iterations; unsigned char salt[SALTLEN]; unsigned int crypto_size; unsigned int inlined; unsigned char ct[LINE_BUFFER_SIZE / 2]; /* after hex conversion */ } *cur_salt; static struct fmt_tests keyring_tests[] = { {"$keyring$db1b562e453a0764*3221*16*0*02b5c084e4802369c42507300f2e5e56", "openwall"}, {"$keyring$4f3f1557a7da17f5*2439*144*0*12215fabcff6782aa23605ab2cd843f7be9477b172b615eaa9130836f189d32ffda2e666747378f09c6e76ad817154daae83a36c0a0a35f991d40bcfcba3b7807ef57a0ce4c7f835bf34c6e358f0d66aa048d73dacaaaf6d7fa4b3510add6b88cc237000ff13cb4dbd132db33be3ea113bedeba80606f86662cc226af0dad789c703a7df5ad8700542e0f7a5e1f10cf0", "password"}, {NULL} }; static keyring_password *inbuffer; static keyring_hash *outbuffer; static keyring_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; #define insize (sizeof(keyring_password) * global_work_size) #define outsize (sizeof(keyring_hash) * global_work_size) #define settingsize (sizeof(keyring_salt)) #define cracked_size (sizeof(*cracked) * global_work_size) #define STEP 0 #define SEED 256 static const char * warn[] = { "xfer: " , ", crypt: " , ", xfer: " }; //This file contains auto-tuning routine(s). It has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t global_work_size, struct fmt_main *self) { cl_int cl_error; inbuffer = (keyring_password*) mem_calloc(1, insize); outbuffer = (keyring_hash*) mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; cl_int cl_error; snprintf(build_opts, sizeof(build_opts), "-DPLAINTEXT_LENGTH=%d -DSALTLEN=%d", PLAINTEXT_LENGTH, SALTLEN); opencl_init("$JOHN/kernels/keyring_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "keyring", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(keyring_password), 0, db); //Auto tune execution from shared/included code. autotune_run(self, 1, 0, cpu(device_info[gpu_id]) ? 500000000ULL : 1000000000ULL); } } static int looks_like_nice_int(char *p) { // reasonability check + avoids atoi's UB if (strlen(p) > 9) return 0; for (; *p; p++) if (*p < '0' || *p > '9') return 0; return 1; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int ctlen, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; if (keeptr == NULL) goto err; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != SALTLEN * 2 || extra) goto err; while (*p) if (atoi16[ARCH_INDEX(*p++)] == 0x7f) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; if (!looks_like_nice_int(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* crypto size */ goto err; if (!looks_like_nice_int(p)) goto err; ctlen = atoi(p); if (ctlen > sizeof(cur_salt->ct)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* inlined - unused? TODO */ goto err; if (!looks_like_nice_int(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */ goto err; if (ctlen > LINE_BUFFER_SIZE) goto err; if (hexlenl(p, &extra) != ctlen * 2 || extra) goto err; if (strlen(p) < 32) /* this shouldn't happen for valid hashes */ goto err; while (*p) if (atoi16l[ARCH_INDEX(*p++)] == 0x7f) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); if (!cur_salt) cur_salt = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD); ctcopy += FORMAT_TAG_LEN; /* skip over "$keyring$" */ p = strtokm(ctcopy, "*"); for (i = 0; i < SALTLEN; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.iterations = atoi(p); p = strtokm(NULL, "*"); cs.crypto_size = atoi(p); p = strtokm(NULL, "*"); cs.inlined = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.crypto_size; i++) cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, SALTLEN); currentsalt.length = SALTLEN; currentsalt.iterations = cur_salt->iterations; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); } static void keyring_set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int verify_decrypted_buffer(unsigned char *buffer, int len) { guchar digest[16]; MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, buffer + 16, len - 16); MD5_Final(digest, &ctx); return memcmp(buffer, digest, 16) == 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "clFinish"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_FALSE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); /// Await completion of all the above BENCH_CLERROR(clFinish(queue[gpu_id]), "clFinish"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char buffer[LINE_BUFFER_SIZE / 2]; unsigned char iv[16]; AES_KEY akey; unsigned char *p = outbuffer[index].iv; memcpy(iv, p, 16); memcpy(buffer, cur_salt->ct, cur_salt->crypto_size); memset(&akey, 0, sizeof(AES_KEY)); if (AES_set_decrypt_key(outbuffer[index].key, 128, &akey) < 0) { fprintf(stderr, "AES_set_decrypt_key failed!\n"); } AES_cbc_encrypt(buffer, buffer, cur_salt->crypto_size, &akey, iv, AES_DECRYPT); if (verify_decrypted_buffer(buffer, cur_salt->crypto_size)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_opencl_keyring = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, keyring_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, keyring_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short int type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[256], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(CompositeOperator op) { const char *blend_mode; switch (op) { case ColorBurnCompositeOp: blend_mode = "idiv"; break; case ColorDodgeCompositeOp: blend_mode = "div "; break; case ColorizeCompositeOp: blend_mode = "colr"; break; case DarkenCompositeOp: blend_mode = "dark"; break; case DifferenceCompositeOp: blend_mode = "diff"; break; case DissolveCompositeOp: blend_mode = "diss"; break; case ExclusionCompositeOp: blend_mode = "smud"; break; case HardLightCompositeOp: blend_mode = "hLit"; break; case HardMixCompositeOp: blend_mode = "hMix"; break; case HueCompositeOp: blend_mode = "hue "; break; case LightenCompositeOp: blend_mode = "lite"; break; case LinearBurnCompositeOp: blend_mode = "lbrn"; break; case LinearDodgeCompositeOp:blend_mode = "lddg"; break; case LinearLightCompositeOp:blend_mode = "lLit"; break; case LuminizeCompositeOp: blend_mode = "lum "; break; case MultiplyCompositeOp: blend_mode = "mul "; break; case OverCompositeOp: blend_mode = "norm"; break; case OverlayCompositeOp: blend_mode = "over"; break; case PinLightCompositeOp: blend_mode = "pLit"; break; case SaturateCompositeOp: blend_mode = "sat "; break; case ScreenCompositeOp: blend_mode = "scrn"; break; case SoftLightCompositeOp: blend_mode = "sLit"; break; case VividLightCompositeOp: blend_mode = "vLit"; break; default: blend_mode = "norm"; } return(blend_mode); } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); image->alpha_trait=BlendPixelTrait; status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,image->columns,image->rows,MagickTrue, exception); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=background; SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } image->alpha_trait=BlendPixelTrait; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register Quantum *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); else if (image->depth > 8) return(2); } else if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static void ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image,ExceptionInfo *exception) { const unsigned char *p; StringInfo *profile; unsigned int count, long_sans; unsigned short id, short_sans; if (length < 16) return; profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); (void) SetImageProfile(image,"8bim",profile,exception); profile=DestroyStringInfo(profile); for (p=blocks; (p >= blocks) && (p < (blocks+length-16)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if ((p+count) > (blocks+length-16)) return; switch (id) { case 0x03ed: { char value[MagickPathExtent]; unsigned short resolution; /* Resolution info. */ p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g",image->resolution.x); (void) SetImageProperty(image,"tiff:XResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g",image->resolution.y); (void) SetImageProperty(image,"tiff:YResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if (*(p+4) == 0) *has_merged_image=MagickFalse; p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } return; } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { if (packet_size == 1) SetPixelIndex(image,ScaleQuantumToChar(pixel),q); else SetPixelIndex(image,ScaleQuantumToShort(pixel),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) ConstrainColormapIndex(image,GetPixelIndex(image,q),exception),q); return; } switch (type) { case -1: { SetPixelAlpha(image, pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); if (channels == 1 || type == -2) SetPixelGray(image,pixel,q); break; } case 1: { if (image->storage_class == PseudoClass) SetPixelAlpha(image,pixel,q); else SetPixelGreen(image,pixel,q); break; } case 2: { if (image->storage_class == PseudoClass) SetPixelAlpha(image,pixel,q); else SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const size_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; unsigned short nibble; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else { p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t count, row_size; ssize_t y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != row_size) break; status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > row_size + 256) // arbitrary number { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } ResetMagickMemory(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if (layer_info->channel_info[channel].type < -1) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickTrue; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } layer_info->mask.image=mask; return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; if (psd_info->mode == CMYKMode) SetImageColorspace(layer_info->image,CMYKColorspace,exception); else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) || (psd_info->mode == GrayscaleMode)) SetImageColorspace(layer_info->image,GRAYColorspace,exception); /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j, compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); status=MagickFalse; if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0)) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(short) ReadBlobShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(layer_info,0,(size_t) number_layers* sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); layer_info[i].page.y=ReadBlobSignedLong(image); layer_info[i].page.x=ReadBlobSignedLong(image); y=ReadBlobSignedLong(image); x=ReadBlobSignedLong(image); layer_info[i].page.width=(size_t) (x-layer_info[i].page.x); layer_info[i].page.height=(size_t) (y-layer_info[i].page.y); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=ReadBlobSignedLong(image); layer_info[i].mask.page.x=ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); /* We read it, but don't use it... */ for (j=0; j < (ssize_t) length; j+=8) { size_t blend_source=ReadBlobLong(image); size_t blend_dest=ReadBlobLong(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " source(%x), dest(%x)",(unsigned int) blend_source,(unsigned int) blend_dest); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickFalse); return(ReadPSDLayersInternal(image,image_info,psd_info,skip_layers, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,i,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,i,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; ssize_t count; unsigned char *data; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count == 0) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); if (SetImageBackgroundColor(image,exception) == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } if (psd_info.mode == LabMode) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { SetImageColorspace(image,CMYKColorspace,exception); if (psd_info.channels > 4) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { status=AcquireImageColormap(image,psd_info.depth != 16 ? 256 : 65536, exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); SetImageColorspace(image,GRAYColorspace,exception); if (psd_info.channels > 1) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); } else if (psd_info.channels > 3) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if (psd_info.mode == DuotoneMode) { /* Duotone image data; the format of this data is undocumented. */ data=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*data)); if (data == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) ReadBlob(image,(size_t) length,data); data=(unsigned char *) RelinquishMagickMemory(data); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } ParseImageResourceBlocks(image,blocks,(size_t) length,&has_merged_image, exception); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); if ((has_merged_image != MagickFalse) || (GetImageListLength(image) == 1)) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) && (length != 0)) { SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image *merged; if (GetImageListLength(image) == 1) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); SetImageAlphaChannel(image,TransparentAlphaChannel,exception); image->background_color.alpha=TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned short) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset=TellBlob(image); SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=(WriteBlobMSBLong(image,(unsigned short) size)); SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobMSBLong(image,(unsigned int) size)); return(WriteBlobMSBLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset=TellBlob(image); SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBLong(image,(unsigned int) size); else result=WriteBlobMSBLongLong(image,size); SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const ssize_t channels) { size_t length; ssize_t i, y; if (next_image->compression == RLECompression) { length=WriteBlobMSBShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) length=WriteBlobMSBShort(image,ZipWithoutPrediction); #endif else length=WriteBlobMSBShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { int y; MagickBooleanType monochrome; QuantumInfo *quantum_info; register const Quantum *p; register ssize_t i; size_t count, length; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE #define CHUNK 16384 int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK, sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } ResetMagickMemory(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (next_image->compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) CHUNK; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) CHUNK-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if (next_image->storage_class != PseudoClass) { if (IsImageGray(next_image) == MagickFalse) channels=next_image->colorspace == CMYKColorspace ? 4 : 3; if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if (next_image->storage_class == PseudoClass) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=WriteBlobMSBSignedShort(image,channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) CopyMagickMemory(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12))) { (void) CopyMagickMemory(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(*p++); key[1]=(*p++); key[2]=(*p++); key[3]=(*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) CopyMagickMemory(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *icc_profile, *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; PSDInfo psd_info; register ssize_t i; size_t layer_count, layer_index, length, name_length, num_channels, packet_size, rounded_size, size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ if (SetImageGray(image,exception) != MagickFalse) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar( image->colormap[i].green)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((MagickOffsetType) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); SetPSDSize(&psd_info,image,0); SetPSDSize(&psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobMSBShort(image,-(unsigned short) layer_count); else size+=WriteBlobMSBShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=strlen(property) == 9 ? 255 : 0; } size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y); size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+ next_image->rows)); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+ next_image->columns)); channels=1U; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=next_image->colorspace == CMYKColorspace ? 4U : 3U; total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobMSBShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(&psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(&psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(&psd_info,image,-2); size+=WriteBlob(image,4,(const unsigned char *) "8BIM"); size+=WriteBlob(image,4,(const unsigned char *) CompositeOperatorToPSDBlendMode(next_image->compose)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ? 1 << 0x02 : 1); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobMSBLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobMSBLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobMSBLong(image,20); size+=WriteBlobMSBSignedLong(image,mask->page.y); size+=WriteBlobMSBSignedLong(image,mask->page.x); size+=WriteBlobMSBLong(image,(const unsigned int) mask->rows+ mask->page.y); size+=WriteBlobMSBLong(image,(const unsigned int) mask->columns+ mask->page.x); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobMSBLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(&psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write the total size */ size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 16),size_offset); if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(&psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
GB_binop__fmod_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__fmod_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__fmod_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__fmod_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__fmod_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__fmod_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__fmod_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__fmod_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__fmod_fp32) // C=scalar+B GB (_bind1st__fmod_fp32) // C=scalar+B' GB (_bind1st_tran__fmod_fp32) // C=A+scalar GB (_bind2nd__fmod_fp32) // C=A'+scalar GB (_bind2nd_tran__fmod_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = fmodf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = fmodf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FMOD || GxB_NO_FP32 || GxB_NO_FMOD_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__fmod_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__fmod_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__fmod_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__fmod_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__fmod_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = fmodf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__fmod_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = fmodf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmodf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__fmod_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmodf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Par-14-NestedFollowingNestedPar.c
int main(int argc, char **argv) { int a[4] = {1,2,3,4}; int b[4] = {1,1,1,1}; int c[4] = {0,2,1,3}; for (int i = 0; i < 1; ++i) { if (i < 2) { return -1; } } #pragma omp parallel for for (int i = 0; i < 4; ++i) { a[i] = 3*a[i]; #pragma omp parallel for for(int j = 0; j < 4; ++j) { b[j] += a[i]; } #pragma omp parallel for for(int k = 0; k < 4; ++k) { c[k] = a[i] * b[k] + c[k]; #pragma omp parallel for for(int j = 0; j < 4; ++j) { b[j] += a[i]; } } } return 0; }
GB_binop__bxor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int32) // C=scalar+B GB (_bind1st__bxor_int32) // C=scalar+B' GB (_bind1st_tran__bxor_int32) // C=A+scalar GB (_bind2nd__bxor_int32) // C=A'+scalar GB (_bind2nd_tran__bxor_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_INT32 || GxB_NO_BXOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GraphBLAS.h
/* * GraphBLAS.h * * Created on: May 31, 2016 * Author: Michael Wegner (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_GRAPHBLAS_H_ #define NETWORKIT_CPP_ALGEBRAIC_GRAPHBLAS_H_ #include <limits> #include "Semirings.h" #include "SparseAccumulator.h" #include "AlgebraicGlobals.h" #include "Vector.h" /** * @ingroup algebraic * Implements the GraphBLAS interface. For more information visit https://graphblas.org. */ namespace GraphBLAS { // **************************************************** // Operations // **************************************************** /** * Computes binOp(A(i,j), B(i,j)) for all i,j element-wise. Note that the dimensions of * @a A and @a B must coincide and that the zero must be the same. * @param A * @param B * @param binOp * @return The resulting matrix. */ template<class SemiRing, class Matrix, typename L> Matrix eWiseBinOp(const Matrix& A, const Matrix& B, L binOp) { assert(A.numberOfRows() == B.numberOfRows() && A.numberOfColumns() == B.numberOfColumns()); assert(A.getZero() == B.getZero() && A.getZero() == SemiRing::zero()); std::vector<int64_t> columnPointer(A.numberOfColumns(), -1); std::vector<double> Arow(A.numberOfColumns(), SemiRing::zero()); std::vector<double> Brow(A.numberOfColumns(), SemiRing::zero()); std::vector<NetworKit::Triplet> triplets; for (NetworKit::index i = 0; i < A.numberOfRows(); ++i) { NetworKit::index listHead = 0; NetworKit::count nnz = 0; // search for nonZeros in matrix A A.forNonZeroElementsInRow(i, [&](NetworKit::index j, double value) { Arow[j] = value; columnPointer[j] = listHead; listHead = j; nnz++; }); // search for nonZeros in matrix B B.forNonZeroElementsInRow(i, [&](NetworKit::index j, double value) { Brow[j] = value; if (columnPointer[j] == -1) { // matrix A does not have a nonZero entry in column j columnPointer[j] = listHead; listHead = j; nnz++; } }); // apply operator on the found nonZeros in A and B for (NetworKit::count k = 0; k < nnz; ++k) { double value = binOp(Arow[listHead], Brow[listHead]); if (value != SemiRing::zero()) { triplets.push_back({i,listHead,value}); } NetworKit::index temp = listHead; listHead = columnPointer[listHead]; // reset for next row columnPointer[temp] = -1; Arow[temp] = SemiRing::zero(); Brow[temp] = SemiRing::zero(); } nnz = 0; } return Matrix(A.numberOfRows(), A.numberOfColumns(), triplets, A.getZero()); } /** * Computes the matrix-matrix multiplication of @a A and @a B. Note that * A.numberOfColumns() must be equal to B.numberOfRows() and the zero elements * must be the same. The default Semiring is the ArithmeticSemiring. * @param A * @param B * @return The result of the multiplication A * B. */ template<class SemiRing = ArithmeticSemiring, class Matrix> Matrix MxM(const Matrix& A, const Matrix& B) { assert(A.numberOfColumns() == B.numberOfRows()); assert(A.getZero() == SemiRing::zero() && B.getZero() == SemiRing::zero()); std::vector<NetworKit::Triplet> triplets; NetworKit::SparseAccumulator spa(B.numberOfRows()); for (NetworKit::index i = 0; i < A.numberOfRows(); ++i) { A.forNonZeroElementsInRow(i, [&](NetworKit::index k, double w1) { B.forNonZeroElementsInRow(k, [&](NetworKit::index j, double w2) { spa.scatter(SemiRing::mult(w1,w2), j, *SemiRing::add); }); }); spa.gather([&](NetworKit::index i, NetworKit::index j, double value){ triplets.push_back({i,j,value}); }); spa.increaseRow(); } return Matrix(A.numberOfRows(), B.numberOfColumns(), triplets, A.getZero()); } /** * Computes the matrix-matrix multiplication of @a A and @a B and adds it to @a C where * the add operation is that of the specified Semiring (i.e. C(i,j) = SemiRing::add(C(i,j), (A*B)(i,j))). * The default Semiring is the ArithmeticSemiring. * @param A * @param B * @param C */ template<class SemiRing = ArithmeticSemiring, class Matrix> void MxM(const Matrix& A, const Matrix& B, Matrix& C) { assert(A.numberOfColumns() == B.numberOfRows() && A.numberOfRows() == C.numberOfRows() && B.numberOfColumns() == C.numberOfColumns()); assert(A.getZero() == SemiRing::zero() && B.getZero() == SemiRing::zero() && C.getZero() == SemiRing::zero()); std::vector<NetworKit::Triplet> triplets; NetworKit::SparseAccumulator spa(B.numberOfRows()); for (NetworKit::index i = 0; i < A.numberOfRows(); ++i) { A.forNonZeroElementsInRow(i, [&](NetworKit::index k, double w1) { B.forNonZeroElementsInRow(k, [&](NetworKit::index j, double w2) { spa.scatter(SemiRing::mult(w1,w2), j, *SemiRing::add); }); }); spa.gather([&](NetworKit::index i, NetworKit::index j, double value){ triplets.push_back({i,j,value}); }); spa.increaseRow(); } Matrix temp(A.numberOfRows(), B.numberOfRows(), triplets, A.getZero()); C = eWiseBinOp<SemiRing, Matrix>(C, temp, *SemiRing::add); } /** * Computes the matrix-matrix multiplication of @a A and @a B and adds it to @a C where * the add operation is specified by the binary function @a accum (i.e. C(i,j) = accum(C(i,j), (A*B)(i,j))). * The default Semiring is the ArithmeticSemiring. * @param A * @param B * @param C * @param accum */ template<class SemiRing = ArithmeticSemiring, typename F, class Matrix> void MxM(const Matrix& A, const Matrix& B, Matrix& C, F accum) { assert(A.numberOfColumns() == B.numberOfRows() && A.numberOfRows() == C.numberOfRows() && B.numberOfColumns() == C.numberOfColumns()); assert(A.getZero() == SemiRing::zero() && B.getZero() == SemiRing::zero() && C.getZero() == SemiRing::zero()); std::vector<NetworKit::Triplet> triplets; NetworKit::SparseAccumulator spa(B.numberOfRows()); for (NetworKit::index i = 0; i < A.numberOfRows(); ++i) { A.forNonZeroElementsInRow(i, [&](NetworKit::index k, double w1) { B.forNonZeroElementsInRow(k, [&](NetworKit::index j, double w2) { spa.scatter(SemiRing::mult(w1,w2), j, *SemiRing::add); }); }); spa.gather([&](NetworKit::index i, NetworKit::index j, double value){ triplets.push_back({i,j,value}); }); spa.increaseRow(); } Matrix temp(A.numberOfRows(), B.numberOfRows(), triplets, A.getZero()); C = eWiseBinOp<SemiRing, Matrix>(C, temp, accum); } /** * Computes the matrix-vector product of matrix @a A and Vector @a v. The default Semiring is the ArithmeticSemiring. * @param A * @param v */ template<class SemiRing = ArithmeticSemiring, class Matrix> NetworKit::Vector MxV(const Matrix& A, const NetworKit::Vector& v) { assert(!v.isTransposed()); assert(A.numberOfColumns() == v.getDimension()); assert(A.getZero() == SemiRing::zero()); NetworKit::Vector result(A.numberOfRows(), A.getZero()); A.parallelForNonZeroElementsInRowOrder([&](NetworKit::index i, NetworKit::index j, double value) { result[i] = SemiRing::add(result[i], SemiRing::mult(value, v[j])); }); return result; } /** * Computes the matrix-vector product of matrix @a A and Vector @a v and adds it to @a c where the add operation * is that of the specified Semiring (i.e. c[i] = SemiRing::add(c[i], (A*v)[i]). The default Semiring is the * ArithmeticSemiring. * @param A * @param v * @param c */ template<class SemiRing = ArithmeticSemiring, class Matrix> void MxV(const Matrix& A, const NetworKit::Vector& v, NetworKit::Vector& c) { assert(!v.isTransposed()); assert(A.numberOfColumns() == v.getDimension()); assert(A.getZero() == SemiRing::zero()); A.parallelForNonZeroElementsInRowOrder([&](NetworKit::index i, NetworKit::index j, double value) { c[i] = SemiRing::add(c[i], SemiRing::mult(value, v[j])); }); } /** * Computes the matrix-vector product of matrix @a A and Vector @a v and adds it to @a c where the add operation * is that of the specified binary function @a accum (i.e. c[i] = accum(c[i], (A*v)[i]). The default Semiring is the * ArithmeticSemiring. * @param A * @param v * @param c */ template<class SemiRing = ArithmeticSemiring, typename F, class Matrix> void MxV(const Matrix& A, const NetworKit::Vector& v, NetworKit::Vector& c, F accum) { assert(!v.isTransposed()); assert(A.numberOfColumns() == v.getDimension()); assert(A.getZero() == SemiRing::zero()); A.parallelForNonZeroElementsInRowOrder([&](NetworKit::index i, NetworKit::index j, double value) { c[i] = accum(c[i], SemiRing::mult(value, v[j])); }); } /** * Computes SemiRing::add(A(i,j), B(i,j)) for all i,j element-wise and returns the resulting matrix. The default * Semiring is the ArithmeticSemiring. * @param A * @param B */ template<class SemiRing = ArithmeticSemiring, class Matrix> Matrix eWiseAdd(const Matrix& A, const Matrix& B) { return eWiseBinOp<SemiRing, Matrix>(A, B, [](const double a, const double b) {return SemiRing::add(a,b);}); } /** * Computes SemiRing::mult(A(i,j), B(i,j)) for all i,j element-wise and returns the resulting matrix. The default * Semiring is the ArithmeticSemiring. * @param A * @param B * @return */ template<class SemiRing = ArithmeticSemiring, class Matrix> Matrix eWiseMult(const Matrix& A, const Matrix& B) { return eWiseBinOp<SemiRing, Matrix>(A, B, [](const double a, const double b) {return SemiRing::mult(a,b);}); } /** * Computes the row-reduction of the @a matrix and returns the result as a vector. That is, the elements of each row * are summed up to form the respective entry in the result vector. The add operator is that of the specified * Semiring. The default Semiring is the ArithmeticSemiring. * @param matrix */ template<class SemiRing = ArithmeticSemiring, class Matrix> NetworKit::Vector rowReduce(const Matrix& matrix) { assert(matrix.getZero() == SemiRing::zero()); NetworKit::Vector rowReduction(matrix.numberOfRows(), 0.0); #pragma omp parallel for for (NetworKit::index i = 0; i < matrix.numberOfRows(); ++i) { matrix.forNonZeroElementsInRow(i, [&](NetworKit::index j, double value) { rowReduction[i] = SemiRing::add(rowReduction[i], value); }); } return rowReduction; } /** * Computes the column-reduction of the @a matrix and returns the result as a Vector. That is, the elements of each * column are summed up to form the respective entry in the result Vector. The add operator is that of the specified * Semiring. The default Semiring is the ArithmeticSemiring. * @param matrix */ template<class SemiRing = ArithmeticSemiring, class Matrix> NetworKit::Vector columnReduce(const Matrix& matrix) { assert(matrix.getZero() == SemiRing::zero()); NetworKit::Vector columnReduction(matrix.numberOfColumns(), 0.0); matrix.forNonZeroElementsInRowOrder([&](NetworKit::index i, NetworKit::index j, double value) { columnReduction[j] = SemiRing::add(columnReduction[j], value); }); return columnReduction; } } #endif /* NETWORKIT_CPP_ALGEBRAIC_GRAPHBLAS_H_ */
Fdtd.h
#pragma once #include "Constants.h" #include "FieldSolver.h" #include "Grid.h" #include "PmlFdtd.h" #include "Vectors.h" #include <algorithm> namespace pfc { class FDTD : public RealFieldSolver<YeeGridType> { public: FDTD(YeeGrid* grid); void updateFields(); void setPML(int sizePMLx, int sizePMLy, int sizePMLz); void setFieldGenerator(FieldGeneratorYee * _generator); void updateHalfB(); void updateE(); private: void updateHalfB3D(); void updateHalfB2D(); void updateHalfB1D(); void updateE3D(); void updateE2D(); void updateE1D(); FP3 anisotropyCoeff; void setAnisotropy(const FP frequency, int axis); }; inline FDTD::FDTD(YeeGrid* grid) : RealFieldSolver(grid) { updateDims(); pml.reset(new Pml<GridTypes::YeeGridType>(this, Int3(0, 0, 0)));//pml.reset(new PmlFdtd(this));; generator.reset(new ReflectFieldGeneratorYee(this)); updateInternalDims(); anisotropyCoeff = FP3(1, 1, 1); } inline void FDTD::setPML(int sizePMLx, int sizePMLy, int sizePMLz) { pml.reset(new PmlFdtd(this, Int3(sizePMLx, sizePMLy, sizePMLz))); updateInternalDims(); } inline void FDTD::setFieldGenerator(FieldGeneratorYee * _generator) { generator.reset(_generator); } inline void FDTD::setAnisotropy(FP frequency, int axis) { // We introduce artificial anisotropy, through one axis. // For this we upgrade Maxwell equations by coefficients, // which computes from major signal frequency. // See more in Juntunen,Tsiboukis - Reduction of Numerical Dispersion in // FDTD Method Through Artificial Anisotropy. FP3 steps = grid->steps; FP WP = constants::pi * 2.0 * constants::c / frequency; FP R = WP / steps.norm(); const FP q = 0.99; // q - stability coefficient, 0 <= q <= 1 FP Amax = constants::pi / (3 * R * asin(asin(constants::pi / (R * sqrt(3.0))) / sqrt(3.0))); FP Q = Amax - 1; FP c1 = 1 - Q / 2; int axis0 = axis; int axis1 = (axis + 1) % 3; int axis2 = (axis + 2) % 3; // equivalents of the variables // Z1 == Zy, Zz == Zz // Zy,Zz - designation from article FP Z1 = steps[axis0] / steps[axis1]; FP Z2 = steps[axis0] / steps[axis2]; // equivalents of the variables // CoeffA == K1, CoeffB == K2, a1 == a, a2 == b // K1, K2, a, b - designation from article FP CoeffA = constants::pi / (R * sqrt(1 + 1 / (Z1 * Z1) + 1 / (Z2 * Z2))); FP a1 = sin(CoeffA / c1) * sin(CoeffA / c1) / (Z1 * Z1 * sin(CoeffA / (c1 * Z1)) * sin(CoeffA / (c1 * Z1))); FP a2 = sin(CoeffA / c1) * sin(CoeffA / c1) / (Z2 * Z2 * sin(CoeffA / (c1 * Z2)) * sin(CoeffA / (c1 * Z2))); FP CoeffB = sqrt(1 + a1 * Z1 * Z1 + a2 * Z2 * Z2); anisotropyCoeff[axis0] = CoeffB / (CoeffA * q * sqrt(a1 * a2)) * asin(q * sin(CoeffA / c1) / CoeffB); anisotropyCoeff[axis1] = a1 * anisotropyCoeff[axis0]; anisotropyCoeff[axis2] = a2 * anisotropyCoeff[axis0]; } inline void FDTD::updateFields() { updateHalfB(); pml->updateB(); generator->generateB(); updateE(); pml->updateE(); generator->generateE(); updateHalfB(); } // Update grid values of magnetic field in FDTD. inline void FDTD::updateHalfB() { if (grid->dimensionality == 3) updateHalfB3D(); else if (grid->dimensionality == 2) updateHalfB2D(); else if (grid->dimensionality == 1) updateHalfB1D(); } inline void FDTD::updateHalfB3D() { updateBAreaBegin = Int3(1, 1, 1); updateBAreaEnd = grid->numCells - Int3(1, 1, 1); for (int d = 0; d < 3; ++d) { internalBAreaBegin[d] = std::max(updateBAreaBegin[d], pml->leftDims[d]); internalBAreaEnd[d] = std::min(updateBAreaEnd[d], grid->numCells[d] - pml->rightDims[d]); } const FP cdt = constants::c * grid->dt * (FP)0.5; const FP coeffXY = cdt / (grid->steps.x * anisotropyCoeff.y); const FP coeffXZ = cdt / (grid->steps.x * anisotropyCoeff.z); const FP coeffYX = cdt / (grid->steps.y * anisotropyCoeff.x); const FP coeffYZ = cdt / (grid->steps.y * anisotropyCoeff.z); const FP coeffZX = cdt / (grid->steps.z * anisotropyCoeff.x); const FP coeffZY = cdt / (grid->steps.z * anisotropyCoeff.y); // In central area use b(i, j, k) += c * dt * -rot(e(i, j, k)), which is: // b.x(i, j, k) += c * dt * ((e.y(i, j, k) - e.y(i, j, k-1)) / eps_z * dz - // (e.z(i, j, k) - e.z(i, j-1, k)) / eps_y * dy), // b.y(i, j, k) += c * dt * ((e.z(i, j, k) - e.z(i-1, j, k)) / eps_x * dx - // (e.x(i, j, k) - e.x(i, j, k-1)) / eps_z * dz), // b.z(i, j, k) += c * dt * ((e.x(i, j, k) - e.x(i, j-1, k)) / eps_y * dy - // (e.y(i, j, k) - e.y(i-1, j, k)) / eps_x * dx), const Int3 begin = internalBAreaBegin; const Int3 end = internalBAreaEnd; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { #pragma simd for (int k = begin.z; k < end.z; k++) { grid->Bx(i, j, k) += coeffZX * (grid->Ey(i, j, k) - grid->Ey(i, j, k - 1)) - coeffYX * (grid->Ez(i, j, k) - grid->Ez(i, j - 1, k)); grid->By(i, j, k) += coeffXY * (grid->Ez(i, j, k) - grid->Ez(i - 1, j, k)) - coeffZY * (grid->Ex(i, j, k) - grid->Ex(i, j, k - 1)); grid->Bz(i, j, k) += coeffYZ * (grid->Ex(i, j, k) - grid->Ex(i, j - 1, k)) - coeffXZ * (grid->Ey(i, j, k) - grid->Ey(i - 1, j, k)); } } } inline void FDTD::updateHalfB2D() { updateBAreaBegin = Int3(1, 1, 0); updateBAreaEnd = grid->numCells - Int3(1, 1, 0); for (int d = 0; d < 2; ++d) { internalBAreaBegin[d] = std::max(updateBAreaBegin[d], pml->leftDims[d]); internalBAreaEnd[d] = std::min(updateBAreaEnd[d], grid->numCells[d] - pml->rightDims[d]); } const FP cdt = constants::c * grid->dt * (FP)0.5; const FP coeffXY = cdt / (grid->steps.x * anisotropyCoeff.y); const FP coeffXZ = cdt / (grid->steps.x * anisotropyCoeff.z); const FP coeffYX = cdt / (grid->steps.y * anisotropyCoeff.x); const FP coeffYZ = cdt / (grid->steps.y * anisotropyCoeff.z); // In central area use b(i, j, k) += c * dt * -rot(e(i, j, k)), which is: // b.x(i, j, k) += c * dt * ((e.y(i, j, k) - e.y(i, j, k-1)) / eps_z * dz - // (e.z(i, j, k) - e.z(i, j-1, k)) / eps_y * dy), // b.y(i, j, k) += c * dt * ((e.z(i, j, k) - e.z(i-1, j, k)) / eps_x * dx - // (e.x(i, j, k) - e.x(i, j, k-1)) / eps_z * dz), // b.z(i, j, k) += c * dt * ((e.x(i, j, k) - e.x(i, j-1, k)) / eps_y * dy - // (e.y(i, j, k) - e.y(i-1, j, k)) / eps_x * dx), const Int3 begin = internalBAreaBegin; const Int3 end = internalBAreaEnd; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) { #pragma simd for (int j = begin.y; j < end.y; j++) { grid->Bx(i, j, 0) += -coeffYX * (grid->Ez(i, j, 0) - grid->Ez(i, j - 1, 0)); grid->By(i, j, 0) += coeffXY * (grid->Ez(i, j, 0) - grid->Ez(i - 1, j, 0)); grid->Bz(i, j, 0) += coeffYZ * (grid->Ex(i, j, 0) - grid->Ex(i, j - 1, 0)) - coeffXZ * (grid->Ey(i, j, 0) - grid->Ey(i - 1, j, 0)); } } } inline void FDTD::updateHalfB1D() { updateBAreaBegin = Int3(1, 0, 0); updateBAreaEnd = grid->numCells - Int3(1, 0, 0); for (int d = 0; d < 1; ++d) { internalBAreaBegin[d] = std::max(updateBAreaBegin[d], pml->leftDims[d]); internalBAreaEnd[d] = std::min(updateBAreaEnd[d], grid->numCells[d] - pml->rightDims[d]); } const FP cdt = constants::c * grid->dt * (FP)0.5; const FP coeffXY = cdt / (grid->steps.x * anisotropyCoeff.y); const FP coeffXZ = cdt / (grid->steps.x * anisotropyCoeff.z); // In central area use b(i, j, k) += c * dt * -rot(e(i, j, k)), which is: // b.x(i, j, k) += c * dt * ((e.y(i, j, k) - e.y(i, j, k-1)) / eps_z * dz - // (e.z(i, j, k) - e.z(i, j-1, k)) / eps_y * dy), // b.y(i, j, k) += c * dt * ((e.z(i, j, k) - e.z(i-1, j, k)) / eps_x * dx - // (e.x(i, j, k) - e.x(i, j, k-1)) / eps_z * dz), // b.z(i, j, k) += c * dt * ((e.x(i, j, k) - e.x(i, j-1, k)) / eps_y * dy - // (e.y(i, j, k) - e.y(i-1, j, k)) / eps_x * dx), const Int3 begin = internalBAreaBegin; const Int3 end = internalBAreaEnd; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) { grid->By(i, 0, 0) += coeffXY * (grid->Ez(i, 0, 0) - grid->Ez(i - 1, 0, 0)); grid->Bz(i, 0, 0) += -coeffXZ * (grid->Ey(i, 0, 0) - grid->Ey(i - 1, 0, 0)); } } // Update grid values of electric field in FDTD. inline void FDTD::updateE() { if (grid->dimensionality == 3) updateE3D(); else if (grid->dimensionality == 2) updateE2D(); else if (grid->dimensionality == 1) updateE1D(); } inline void FDTD::updateE3D() { updateEAreaBegin = Int3(0, 0, 0); updateEAreaEnd = grid->numCells - Int3(1, 1, 1); for (int d = 0; d < 3; ++d) { internalEAreaBegin[d] = std::max(updateEAreaBegin[d], pml->leftDims[d]); internalEAreaEnd[d] = std::min(updateEAreaEnd[d], grid->numCells[d] - pml->rightDims[d]); } const FP coeffCurrent = -(FP)4 * constants::pi * grid->dt; const FP cdt = constants::c * grid->dt; const FP coeffXY = cdt / (grid->steps.x * anisotropyCoeff.y); const FP coeffXZ = cdt / (grid->steps.x * anisotropyCoeff.z); const FP coeffYX = cdt / (grid->steps.y * anisotropyCoeff.x); const FP coeffYZ = cdt / (grid->steps.y * anisotropyCoeff.z); const FP coeffZX = cdt / (grid->steps.z * anisotropyCoeff.x); const FP coeffZY = cdt / (grid->steps.z * anisotropyCoeff.y); // In internal area use: // e.x(i, j, k) += dt * -4pi * j.x(i, j, k) + c * dt * ((b.z(i, j+1, k) - // b.z(i, j, k)) / eps_y * dy - (b.y(i, j, k+1) - b.y(i, j, k)) / eps_z * dz), // e.y(i, j, k) += dt * -4pi * j.y(i, j, k) + c * dt * ((b.x(i, j, k+1) - // b.x(i, j, k)) / eps_z * dz - (b.z(i+1, j, k) - b.z(i, j, k)) / eps_x * dx), // e.z(i, j, k) += dt * -4pi * j.z(i, j, k) + c * dt * ((b.y(i+1, j, k) - // b.y(i, j, k)) / eps_x * dx - (b.x(i, j+1, k) - b.x(i, j, k)) / eps_y * dy), const Int3 begin = internalEAreaBegin; const Int3 end = internalEAreaEnd; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { #pragma simd for (int k = begin.z; k < end.z; k++) { grid->Ex(i, j, k) += coeffCurrent * grid->Jx(i, j, k) + coeffYX * (grid->Bz(i, j + 1, k) - grid->Bz(i, j, k)) - coeffZX * (grid->By(i, j, k + 1) - grid->By(i, j, k)); grid->Ey(i, j, k) += coeffCurrent * grid->Jy(i, j, k) + coeffZY * (grid->Bx(i, j, k + 1) - grid->Bx(i, j, k)) - coeffXY * (grid->Bz(i + 1, j, k) - grid->Bz(i, j, k)); grid->Ez(i, j, k) += coeffCurrent * grid->Jz(i, j, k) + coeffXZ * (grid->By(i + 1, j, k) - grid->By(i, j, k)) - coeffYZ * (grid->Bx(i, j + 1, k) - grid->Bx(i, j, k)); } } // Process edge values if (updateEAreaEnd.x == grid->numCells.x - 1) { int i = updateEAreaEnd.x; #pragma omp parallel for for (int j = begin.y; j < end.y; j++) for (int k = begin.z; k < end.z; k++) grid->Ex(i, j, k) += coeffCurrent * grid->Jx(i, j, k) + coeffYX * (grid->Bz(i, j + 1, k) - grid->Bz(i, j, k)) - coeffZX * (grid->By(i, j, k + 1) - grid->By(i, j, k)); } if (updateEAreaEnd.y == grid->numCells.y - 1) { int j = updateEAreaEnd.y; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) for (int k = begin.z; k < end.z; k++) grid->Ey(i, j, k) += coeffCurrent * grid->Jy(i, j, k) + coeffZY * (grid->Bx(i, j, k + 1) - grid->Bx(i, j, k)) - coeffXY * (grid->Bz(i + 1, j, k) - grid->Bz(i, j, k)); } if (updateEAreaEnd.z == grid->numCells.z - 1) { int k = updateEAreaEnd.z; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) grid->Ez(i, j, k) += coeffCurrent * grid->Jz(i, j, k) + coeffXZ * (grid->By(i + 1, j, k) - grid->By(i, j, k)) - coeffYZ * (grid->Bx(i, j + 1, k) - grid->Bx(i, j, k)); } } inline void FDTD::updateE2D() { updateEAreaBegin = Int3(0, 0, 0); updateEAreaEnd = grid->numCells - Int3(1, 1, 0); for (int d = 0; d < 2; ++d) { internalEAreaBegin[d] = std::max(updateEAreaBegin[d], pml->leftDims[d]); internalEAreaEnd[d] = std::min(updateEAreaEnd[d], grid->numCells[d] - pml->rightDims[d]); } const FP coeffCurrent = -(FP)4 * constants::pi * grid->dt; const FP cdt = constants::c * grid->dt; const FP coeffXY = cdt / (grid->steps.x * anisotropyCoeff.y); const FP coeffXZ = cdt / (grid->steps.x * anisotropyCoeff.z); const FP coeffYX = cdt / (grid->steps.y * anisotropyCoeff.x); const FP coeffYZ = cdt / (grid->steps.y * anisotropyCoeff.z); // In internal area use: // e.x(i, j, k) += dt * -4pi * j.x(i, j, k) + c * dt * ((b.z(i, j+1, k) - // b.z(i, j, k)) / eps_y * dy - (b.y(i, j, k+1) - b.y(i, j, k)) / eps_z * dz), // e.y(i, j, k) += dt * -4pi * j.y(i, j, k) + c * dt * ((b.x(i, j, k+1) - // b.x(i, j, k)) / eps_z * dz - (b.z(i+1, j, k) - b.z(i, j, k)) / eps_x * dx), // e.z(i, j, k) += dt * -4pi * j.z(i, j, k) + c * dt * ((b.y(i+1, j, k) - // b.y(i, j, k)) / eps_x * dx - (b.x(i, j+1, k) - b.x(i, j, k)) / eps_y * dy), const Int3 begin = internalEAreaBegin; const Int3 end = internalEAreaEnd; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) { #pragma simd for (int j = begin.y; j < end.y; j++) { grid->Ex(i, j, 0) += coeffCurrent * grid->Jx(i, j, 0) + coeffYX * (grid->Bz(i, j + 1, 0) - grid->Bz(i, j, 0)); grid->Ey(i, j, 0) += coeffCurrent * grid->Jy(i, j, 0) - coeffXY * (grid->Bz(i + 1, j, 0) - grid->Bz(i, j, 0)); grid->Ez(i, j, 0) += coeffCurrent * grid->Jz(i, j, 0) + coeffXZ * (grid->By(i + 1, j, 0) - grid->By(i, j, 0)) - coeffYZ * (grid->Bx(i, j + 1, 0) - grid->Bx(i, j, 0)); } } // Process edge values if (updateEAreaEnd.x == grid->numCells.x - 1) { int i = updateEAreaEnd.x; #pragma omp parallel for for (int j = begin.y; j < end.y; j++) grid->Ex(i, j, 0) += coeffCurrent * grid->Jx(i, j, 0) + coeffYX * (grid->Bz(i, j + 1, 0) - grid->Bz(i, j, 0)); } if (updateEAreaEnd.y == grid->numCells.y - 1) { int j = updateEAreaEnd.y; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) grid->Ey(i, j, 0) += coeffCurrent * grid->Jy(i, j, 0) - coeffXY * (grid->Bz(i + 1, j, 0) - grid->Bz(i, j, 0)); } } inline void FDTD::updateE1D() { updateEAreaBegin = Int3(0, 0, 0); updateEAreaEnd = grid->numCells - Int3(1, 0, 0); for (int d = 0; d < 1; ++d) { internalEAreaBegin[d] = std::max(updateEAreaBegin[d], pml->leftDims[d]); internalEAreaEnd[d] = std::min(updateEAreaEnd[d], grid->numCells[d] - pml->rightDims[d]); } const FP coeffCurrent = -(FP)4 * constants::pi * grid->dt; const FP cdt = constants::c * grid->dt; const FP coeffXY = cdt / (grid->steps.x * anisotropyCoeff.y); const FP coeffXZ = cdt / (grid->steps.x * anisotropyCoeff.z); // In internal area use: // e.x(i, j, k) += dt * -4pi * j.x(i, j, k) + c * dt * ((b.z(i, j+1, k) - // b.z(i, j, k)) / eps_y * dy - (b.y(i, j, k+1) - b.y(i, j, k)) / eps_z * dz), // e.y(i, j, k) += dt * -4pi * j.y(i, j, k) + c * dt * ((b.x(i, j, k+1) - // b.x(i, j, k)) / eps_z * dz - (b.z(i+1, j, k) - b.z(i, j, k)) / eps_x * dx), // e.z(i, j, k) += dt * -4pi * j.z(i, j, k) + c * dt * ((b.y(i+1, j, k) - // b.y(i, j, k)) / eps_x * dx - (b.x(i, j+1, k) - b.x(i, j, k)) / eps_y * dy), const Int3 begin = internalEAreaBegin; const Int3 end = internalEAreaEnd; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) { grid->Ex(i, 0, 0) += coeffCurrent * grid->Jx(i, 0, 0); grid->Ey(i, 0, 0) += coeffCurrent * grid->Jy(i, 0, 0) - coeffXY * (grid->Bz(i + 1, 0, 0) - grid->Bz(i, 0, 0)); grid->Ez(i, 0, 0) += coeffCurrent * grid->Jz(i, 0, 0) + coeffXZ * (grid->By(i + 1, 0, 0) - grid->By(i, 0, 0)); } } }
section.c
#include <stdio.h> #include <omp.h> void funcA(){ printf("En funcA: esta sección ejecuta el thread %d\n", omp_get_thread_num()); } void funcB(){ printf("En funB: esta sección la ejecuta el thread %d\n", omp_get_thread_num()); } main(){ #pragma omp parallel { #pragma omp sections { #pragma omp section (void)funcA(); #pragma omp section (void)funcB(); } } }
utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file utils.h * \brief Basic utilility functions. */ #ifndef MXNET_COMMON_UTILS_H_ #define MXNET_COMMON_UTILS_H_ #include <dmlc/logging.h> #include <dmlc/omp.h> #include <mxnet/engine.h> #include <mxnet/ndarray.h> #include <mxnet/op_attr_types.h> #include <mxnet/graph_attr_types.h> #include <nnvm/graph_attr_types.h> #include <memory> #include <vector> #include <type_traits> #include <utility> #include <random> #include <string> #include <thread> #include <algorithm> #include <functional> namespace mxnet { namespace common { template<typename xpu> void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output); /* * \brief setup default-storage tblobs from source NDArrays. If any source NDArray has non-default * storage, it creates a temp NDArray with default storage and uses the temp tblob. The * function also records the indices of non-default source NDArrays and the indices of * their corresponding temporary NDArrays in the temp array. * \param src list of source NDArray * \param blobs list of tblobs to return * \param temp_src list of source NDArrays which requires temporary default storage representation * \param temp_dst list of temporary destination NDArrays for default storage representation * \param idx_map mapping from indices in source NDArrays to indices in temp_dst. When not set, indices are not recorded * \return true if any source NDArray need to cast storage */ inline bool SetupDefaultBlobs(const std::vector<NDArray>& src, std::vector<TBlob> *blobs, std::vector<NDArray> *temp_src, std::vector<NDArray> *temp_dst, std::unordered_map<uint32_t, uint32_t> *idx_map = nullptr) { bool require_cast = false; for (size_t i = 0; i < src.size(); i++) { auto& nd = src[i]; if (nd.storage_type() != kDefaultStorage) { if (idx_map != nullptr) { (*idx_map)[i] = temp_dst->size(); } NDArray temp(nd.shape(), nd.ctx(), false, nd.dtype()); temp_src->emplace_back(nd); temp_dst->emplace_back(temp); blobs->emplace_back(temp.data()); require_cast = true; } else { blobs->push_back(nd.data()); } } return require_cast; } /* * \brief cast the NDArrays in `src` and store the result in NDArrays in `dst`. * This is only used for storage fallback in executor. * When storage_fallback is false, and `MXNET_EXEC_STORAGE_FALLBACK` == 0, * storage fallback is disallowed. * \param src list of source NDArray to cast * \param dst list of destionation NDArray which hold the result of cast_storage operation * \param ctx operator context for cast_storage operation * \param storage_fallback whether storage_fallback is allowed. When set to false, * its value depends on `MXNET_EXEC_STORAGE_FALLBACK`. */ template <typename xpu> inline void CastNonDefaultStorage(const std::vector<NDArray>& src, const std::vector<NDArray>& dst, const OpContext& ctx, bool storage_fallback = false) { CHECK_GE(dst.size(), src.size()); if (src.size() == 0) return; if (storage_fallback == false) { storage_fallback = dmlc::GetEnv("MXNET_EXEC_STORAGE_FALLBACK", true); } if (storage_fallback == false) { LOG(FATAL) << "Storage type conversion detected during execution. " << "You are probably executing an operator which " << "doesn't support NDArray inputs with non-default storage."; } for (size_t i = 0; i < src.size(); i++) { CastStorageDispatch<xpu>(ctx, src[i], dst[i]); } } // Check if any storage type is not default storage inline bool ContainsNonDefaultStorage(const StorageTypeVector& vstorage) { for (const auto& i : vstorage) { if (i != kUndefinedStorage && i != kDefaultStorage) return true; } return false; } // Check if any NDArray in the list has default storage inline bool ContainsDefaultStorage(const std::vector<NDArray>& ndarrays) { for (const auto &nd : ndarrays) { if (nd.storage_type() == kDefaultStorage) { return true; } } return false; } // heuristic to dermine number of threads per GPU inline int GetNumThreadPerGPU() { // This is resource efficient option. return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2); } // heuristic to get number of matching colors. // this decides how much parallelism we can get in each GPU. inline int GetExecNumMatchColor() { // This is resource efficient option. int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1); return std::min(num_match_color, GetNumThreadPerGPU()); } template<typename T, typename V> V ParallelAccumulate(const T* a, const int n, V start) { V sum = start; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < n; ++i) { sum += a[i]; } return sum; } /*! * \brief * Helper function for ParallelSort. * DO NOT call this function directly. * Use the interface ParallelSort instead. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) { if (len < grainsize) { std::sort(first, first+len, comp); } else { std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp); ParallelSortHelper(first+len/2, len - len/2, grainsize, comp); thr.join(); std::inplace_merge(first, first+len/2, first+len, comp); } } /*! * \brief * Sort the elements in the range [first, last) into the ascending order defined by * the comparator comp. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) { const auto num = std::distance(first, last); size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16)); ParallelSortHelper(first, num, grainsize, comp); } /*! * \brief * Sort the elements in the range [first, last) into ascending order. * The elements are compared using the default < operator. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) { ParallelSort(first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>()); } /*! * \brief Random Engine */ typedef std::mt19937 RANDOM_ENGINE; /*! * \brief Helper functions. */ namespace helper { /*! * \brief Helper for non-array type `T`. */ template <class T> struct UniqueIf { /*! * \brief Type of `T`. */ using SingleObject = std::unique_ptr<T>; }; /*! * \brief Helper for an array of unknown bound `T`. */ template <class T> struct UniqueIf<T[]> { /*! * \brief Type of `T`. */ using UnknownBound = std::unique_ptr<T[]>; }; /*! * \brief Helper for an array of known bound `T`. */ template <class T, size_t kSize> struct UniqueIf<T[kSize]> { /*! * \brief Type of `T`. */ using KnownBound = void; }; } // namespace helper /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs a non-array type `T`. The arguments `args` are passed to the * constructor of `T`. The function does not participate in the overload * resolution if `T` is an array type. */ template <class T, class... Args> typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param n The size of the array to construct. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs an array of unknown bound `T`. The function does not participate * in the overload resolution unless `T` is an array of unknown bound. */ template <class T> typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) { using U = typename std::remove_extent<T>::type; return std::unique_ptr<T>(new U[n]{}); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * * Constructs an arrays of known bound is disallowed. */ template <class T, class... Args> typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete; template<typename FCompType> FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) { static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>"); static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>"); if (ctx.dev_mask() == cpu::kDevMask) { return fcompute_cpu.get(op, nullptr); } else if (ctx.dev_mask() == gpu::kDevMask) { return fcompute_gpu.get(op, nullptr); } else { LOG(FATAL) << "Unknown device mask"; return nullptr; } } } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_
GB_unop__isnan_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isnan_bool_fp64 // op(A') function: GB_unop_tran__isnan_bool_fp64 // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isnan (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isnan (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isnan (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isnan_bool_fp64 ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isnan (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = (aij) ; Cx [p] = isnan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isnan_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__abs_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_uint16_uint16) // op(A') function: GB (_unop_tran__abs_uint16_uint16) // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_uint16_uint16) ( uint16_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_uint16_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/gem.h" #include "magick/gem-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the RGBTransformImage method is: % % MagickBooleanType RGBTransformImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertRGBToCMY(const Quantum red,const Quantum green, const Quantum blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static void ConvertRGBToLab(const Quantum red,const Quantum green, const Quantum blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const Quantum red,const Quantum green, const Quantum blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLuv(const Quantum red,const Quantum green, const Quantum blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToYPbPr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static void ConvertRGBToYDbDr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } MagickExport MagickBooleanType RGBTransformImage(Image *image, const ColorspaceType colorspace) { #define RGBTransformImageTag "RGBTransform/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); pixel.red=(MagickRealType) pixel.red; pixel.green=(MagickRealType) pixel.green; pixel.blue=(MagickRealType) pixel.blue; ConvertRGBToCMYK(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to HSI. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; red=ClampToQuantum((MagickRealType) GetPixelRed(q)); green=ClampToQuantum((MagickRealType) GetPixelGreen(q)); blue=ClampToQuantum((MagickRealType) GetPixelBlue(q)); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: break; } SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum((MagickRealType) GetPixelRed(q)); green=ClampToQuantum((MagickRealType) GetPixelGreen(q)); blue=ClampToQuantum((MagickRealType) GetPixelBlue(q)); SetPixelRed(q,logmap[ScaleQuantumToMap(red)]); SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]); SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601LumaColorspace: { /* Initialize Rec601 luma tables: G = 0.298839*R+0.586811*G+0.114350*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (0.298839*(double) i); y_map[i].y=(MagickRealType) (0.586811*(double) i); z_map[i].y=(MagickRealType) (0.114350*(double) i); x_map[i].z=(MagickRealType) (0.298839*(double) i); y_map[i].z=(MagickRealType) (0.586811*(double) i); z_map[i].z=(MagickRealType) (0.114350*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709LumaColorspace: { /* Initialize Rec709 luma tables: G = 0.21260*R+0.71520*G+0.07220*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.21260*(double) i); y_map[i].x=(MagickRealType) (0.71520*(double) i); z_map[i].x=(MagickRealType) (0.07220*(double) i); x_map[i].y=(MagickRealType) (0.21260*(double) i); y_map[i].y=(MagickRealType) (0.71520*(double) i); z_map[i].y=(MagickRealType) (0.07220*(double) i); x_map[i].z=(MagickRealType) (0.21260*(double) i); y_map[i].z=(MagickRealType) (0.71520*(double) i); z_map[i].z=(MagickRealType) (0.07220*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212600*R+0.715200*G+0.072200*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212600*(double) i); y_map[i].x=(MagickRealType) (0.715200*(double) i); z_map[i].x=(MagickRealType) (0.072200*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.003962014134275617*i; y_map[i].x=0.007778268551236748*i; z_map[i].x=0.001510600706713781*i; x_map[i].y=(-0.002426619775463276)*i; y_map[i].y=(-0.004763965913702149)*i; z_map[i].y=0.007190585689165425*i; x_map[i].z=0.006927257754597858*i; y_map[i].z=(-0.005800713697502058)*i; z_map[i].z=(-0.0011265440570958)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.2201118963486454*(1.099*i-0.099); y_map[i].x=0.4321260306242638*(1.099*i-0.099); z_map[i].x=0.08392226148409894*(1.099*i-0.099); x_map[i].y=(-0.1348122097479598)*(1.099*i-0.099); y_map[i].y=(-0.2646647729834528)*(1.099*i-0.099); z_map[i].y=0.3994769827314126*(1.099*i-0.099); x_map[i].z=0.3848476530332144*(1.099*i-0.099); y_map[i].z=(-0.3222618720834477)*(1.099*i-0.099); z_map[i].z=(-0.06258578094976668)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *restrict q; register size_t blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ (MagickRealType) primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ (MagickRealType) primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ (MagickRealType) primary_info.z; SetPixelRed(q,ScaleMapToQuantum(pixel.red)); SetPixelGreen(q,ScaleMapToQuantum(pixel.green)); SetPixelBlue(q,ScaleMapToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RGBTransformImage) #endif proceed=SetImageProgress(image,RGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register size_t blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=ScaleMapToQuantum(pixel.red); image->colormap[i].green=ScaleMapToQuantum(pixel.green); image->colormap[i].blue=ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace) { if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000; (void) ResetMagickMemory(&image->chromaticity,0,sizeof(image->chromaticity)); if (IsGrayColorspace(colorspace) != MagickFalse) { if ((image->intensity != Rec601LuminancePixelIntensityMethod) && (image->intensity != Rec709LuminancePixelIntensityMethod) && (image->intensity != UndefinedPixelIntensityMethod)) image->gamma=1.000/2.200; image->type=GrayscaleType; } else if (IssRGBColorspace(colorspace) != MagickFalse) image->gamma=1.000/2.200; if (image->gamma == (1.000/2.200)) { image->rendering_intent=PerceptualIntent; image->gamma=1.000/2.200; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } if (IsGrayColorspace(colorspace) != MagickFalse) image->type=GrayscaleType; return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace)); if (image->colorspace == colorspace) return(MagickTrue); /* same colorspace: no op */ /* Convert the reference image from an alternate colorspace to sRGB. */ (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformRGBImage(image,colorspace)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformRGBImage(image,image->colorspace); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (RGBTransformImage(image,colorspace) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % The format of the TransformRGBImage method is: % % MagickBooleanType TransformRGBImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(1.0-cyan)); *green=ClampToQuantum(QuantumRange*(1.0-magenta)); *blue=ClampToQuantum(QuantumRange*(1.0-yellow)); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const MagickRealType value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertCMYKToRGB(MagickPixelPacket *pixel) { pixel->red=((QuantumRange-(QuantumScale*pixel->red* (QuantumRange-pixel->index)+pixel->index))); pixel->green=((QuantumRange-(QuantumScale*pixel->green* (QuantumRange-pixel->index)+pixel->index))); pixel->blue=((QuantumRange-(QuantumScale*pixel->blue* (QuantumRange-pixel->index)+pixel->index))); } static inline void ConvertLabToRGB(const double L,const double a, const double b,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(0.99999999999914679361*Y- 1.2188941887145875e-06*(Pb-0.5)+1.4019995886561440468*(Pr-0.5))); *green=ClampToQuantum(QuantumRange*(0.99999975910502514331*Y- 0.34413567816504303521*(Pb-0.5)-0.71413649331646789076*(Pr-0.5))); *blue=ClampToQuantum(QuantumRange*(1.00000124040004623180*Y+ 1.77200006607230409200*(Pb-0.5)+2.1453384174593273e-06*(Pr-0.5))); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,Quantum *red,Quantum *green,Quantum *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5))); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+0.9562957197589482261*(I-0.5)+ 0.6210244164652610754*(Q-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.2721220993185104464*(I-0.5)- 0.6473805968256950427*(Q-0.5))); *blue=ClampToQuantum(QuantumRange*(Y-1.1069890167364901945*(I-0.5)+ 1.7046149983646481374*(Q-0.5))); } static void ConvertYUVToRGB(const double Y,const double U,const double V, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+ 1.1398279671717170825*(V-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.3946101641414141437*(U-0.5)- 0.5805003156565656797*(V-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+2.0319996843434342537*(U-0.5)- 4.813762626262513e-04*(V-0.5))); } MagickExport MagickBooleanType TransformRGBImage(Image *image, const ColorspaceType colorspace) { #define TransformRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000 }; CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; exception=(&image->exception); switch (image->colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); ConvertCMYKToRGB(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(q); SetPixelRed(q,ClampToQuantum(gray)); SetPixelGreen(q,ClampToQuantum(gray)); SetPixelBlue(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; X=QuantumScale*GetPixelRed(q); Y=QuantumScale*GetPixelGreen(q); Z=QuantumScale*GetPixelBlue(q); switch (image->colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: break; } SetPixelRed(q,ClampToQuantum((MagickRealType) red)); SetPixelGreen(q,ClampToQuantum((MagickRealType) green)); SetPixelBlue(q,ClampToQuantum((MagickRealType) blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum((MagickRealType) logmap[ScaleQuantumToMap(GetPixelRed(q))]); green=ClampToQuantum((MagickRealType) logmap[ScaleQuantumToMap(GetPixelGreen(q))]); blue=ClampToQuantum((MagickRealType) logmap[ScaleQuantumToMap(GetPixelBlue(q))]); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(1.0*(double) i); y_map[i].x=(0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(1.0*(double) i); y_map[i].y=(0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(1.0*(double) i); y_map[i].z=(-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) (0.0000000); z_map[i].x=(MagickRealType) (1.8215000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) ((-0.4302726)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) ((-0.9271435)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) (0.0000000); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(q)); green=ScaleQuantumToMap(GetPixelGreen(q)); blue=ScaleQuantumToMap(GetPixelBlue(q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformRGBImage) #endif proceed=SetImageProgress(image,TransformRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; register size_t blue, green, red; red=ScaleQuantumToMap(image->colormap[i].red); green=ScaleQuantumToMap(image->colormap[i].green); blue=ScaleQuantumToMap(image->colormap[i].blue); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=ClampToQuantum(pixel.red); image->colormap[i].green=ClampToQuantum(pixel.green); image->colormap[i].blue=ClampToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(MagickTrue); }
GB_unop__identity_int64_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int64_fp32) // op(A') function: GB (_unop_tran__identity_int64_fp32) // C type: int64_t // A type: float // cast: int64_t cij = GB_cast_to_int64_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = GB_cast_to_int64_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = GB_cast_to_int64_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int64_fp32) ( int64_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; int64_t z = GB_cast_to_int64_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; int64_t z = GB_cast_to_int64_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int64_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Vec.h
#ifndef VEC_H #define VEC_H /* Szymon Rusinkiewicz Princeton University Vec.h Class for a constant-length vector Supports the following operations: vec v1; // Initialized to (0, 0, 0) vec v2(1.23f); // Initialized to (1.23f, 1.23f, 1.23f) vec v3(1, 2, 3); // Initialized to (1, 2, 3) vec v4(v3); // Copy constructor std::string s=v1.str(); // Returns string of form "x y z" float farray[3]; vec v5 = vec(farray); // Explicit: "v4 = farray" won't work Vec<3,double> vd; // The "vec" used above is Vec<3,float> point p1, p2, p3; // Same as vec v3 = v1 + v2; // Also -, *, / (all componentwise) v3 = 3.5f * v1; // Also vec * scalar, vec / scalar // NOTE: scalar has to be the same type: // it won't work to do double * vec<float> v1 = min(v2, v3); // Componentwise min/max v1 = sin(v2); // Componentwise - all the usual functions... swap(v1, v2); // In-place swap v3 = v1 DOT v2; // Actually operator^ v3 = v1 CROSS v2; // Actually operator% float f = v1[0]; // Subscript float *fp = v1; // Implicit conversion to float * f = len(v1); // Length (also len2 == squared length) f = dist(p1, p2); // Distance (also dist2 == squared distance) normalize(v1); // Normalize (i.e., make it unit length) // normalize(vec(0,0,0)) => vec(1,0,0) v1 = trinorm(p1,p2,p3); // Normal of triangle (area-weighted) cout << v1 << endl; // iostream output in the form (1,2,3) cin >> v2; // iostream input using the same syntax Also defines the utility functions sqr, cube, sgn, fract, clamp, mix, step, smoothstep, faceforward, reflect, refract, and angle */ // Windows defines min and max as macros, which prevents us from using the // type-safe versions from std::, as well as interfering with method defns. // Also define NOMINMAX, which prevents future bad definitions. #ifdef min # undef min #endif #ifdef max # undef max #endif #ifndef NOMINMAX # define NOMINMAX #endif #ifndef _USE_MATH_DEFINES #define _USE_MATH_DEFINES #endif #include <cstddef> #include <cmath> #include <iterator> #include <stdexcept> #include <iostream> #include <algorithm> #include <sstream> // Let gcc optimize conditional branches a bit better... #ifndef likely # if !defined(__GNUC__) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96) # define likely(x) (x) # define unlikely(x) (x) # else # define likely(x) (__builtin_expect((x), 1)) # define unlikely(x) (__builtin_expect((x), 0)) # endif #endif namespace trimesh { using ::std::size_t; // Boost-like compile-time assertion checking template <bool X> struct VEC_STATIC_ASSERTION_FAILURE; template <> struct VEC_STATIC_ASSERTION_FAILURE<true> { void operator () () {} }; #define VEC_STATIC_CHECK(expr) VEC_STATIC_ASSERTION_FAILURE<bool(expr)>() // Vec class declaration template <size_t D, class T = float> class Vec { public: // Types typedef T value_type; typedef value_type *pointer; typedef const value_type *const_pointer; typedef value_type &reference; typedef const value_type &const_reference; typedef value_type *iterator; typedef const value_type *const_iterator; typedef ::std::reverse_iterator<iterator> reverse_iterator; typedef ::std::reverse_iterator<const_iterator> const_reverse_iterator; typedef ::std::size_t size_type; typedef ::std::ptrdiff_t difference_type; protected: // The internal representation: standard array T v[D]; public: // Constructor for no arguments. Everything initialized to 0. Vec() { for (size_type i = 0; i < D; i++) v[i] = T(0); } // Uninitialized constructor - meant mostly for internal use #define VEC_UNINITIALIZED ((void *) 0) Vec(void *) {} // Constructor for one argument - default value. Explicit. explicit Vec(const T &x) { for (size_type i = 0; i < D; i++) v[i] = x; } // Constructors for 2-4 arguments Vec(const T &x, const T &y) { VEC_STATIC_CHECK(D == 2); v[0] = x; v[1] = y; } Vec(const T &x, const T &y, const T &z) { VEC_STATIC_CHECK(D == 3); v[0] = x; v[1] = y; v[2] = z; } Vec(const T &x, const T &y, const T &z, const T &w) { VEC_STATIC_CHECK(D == 4); v[0] = x; v[1] = y; v[2] = z; v[3] = w; } // Constructor from anything that can be accessed using [] // Pretty aggressive, so marked as explicit. template <class S> explicit Vec(const S &x) { for (size_type i = 0; i < D; i++) v[i] = x[i]; } // Using default copy constructor, assignment operator, and destructor // Array reference - no bounds checking reference operator [] (size_type i) { return v[i]; } reference operator [] (int i) { return v[i]; } const_reference operator [] (size_type i) const { return v[i]; } const_reference operator [] (int i) const { return v[i]; } // Array reference with bounds checking reference at(size_type i) { if (i >= D) throw ::std::out_of_range("Vec::at"); return v[i]; } const_reference at(size_type i) const { if (i >= D) throw ::std::out_of_range("Vec::at"); return v[i]; } // Other accessors, for compatibility with std::array reference front() { return v[0]; } const_reference front() const { return v[0]; } reference back() { return v[D-1]; } const_reference back() const { return v[D-1]; } // Conversion to pointer operator T * () { return v; } operator const T * () { return v; } operator const T * () const { return v; } pointer data() { return v; } const_pointer data() const { return v; } // Iterators iterator begin() { return v; } const_iterator begin() const { return v; } const_iterator cbegin() const { return v; } iterator end() { return begin() + D; } const_iterator end() const { return begin() + D; } const_iterator cend() const { return begin() + D; } reverse_iterator rbegin() { return reverse_iterator(end()); } const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } const_reverse_iterator crbegin() const { return const_reverse_iterator(end()); } reverse_iterator rend() { return reverse_iterator(begin()); } const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } const_reverse_iterator crend() const { return const_reverse_iterator(begin()); } // Capacity size_type size() const { return D; } size_type max_size() const { return D; } // String output const std::string str() { std::stringstream str_out; for (size_type i = 0; i < D; i++){ if( i>0 ){ str_out << ' '; } str_out << v[i]; } return str_out.str(); } // not equal with threshold bool neq( const Vec<D,T> &x, float thresh ) { for (size_type i = 0; i < D; i++){ if( fabs(x[i]-v[i])>thresh ){ return true; } } return false; } // equal with threshold bool eq( const Vec<D,T> &x, float thresh ) { for (size_type i = 0; i < D; i++){ if( fabs(x[i]-v[i])>thresh ){ return false; } } return true; } // empty() and clear() - check for all zero or set to zero bool empty() const { for (size_type i = 0; i < D; i++) if (v[i]) return false; return true; } void clear() { for (size_type i = 0; i < D; i++) v[i] = T(0); } // Set all elements to some constant void fill(const value_type &x) { for (size_type i = 0; i < D; i++) v[i] = x; } Vec<D,T> &operator = (const value_type &x) { for (size_type i = 0; i < D; i++) v[i] = x; return *this; } // Member operators Vec<D,T> &operator += (const Vec<D,T> &x) { for (size_type i = 0; i < D; i++) #pragma omp atomic v[i] += x[i]; return *this; } Vec<D,T> &operator -= (const Vec<D,T> &x) { for (size_type i = 0; i < D; i++) #pragma omp atomic v[i] -= x[i]; return *this; } Vec<D,T> &operator *= (const Vec<D,T> &x) { for (size_type i = 0; i < D; i++) #pragma omp atomic v[i] *= x[i]; return *this; } Vec<D,T> &operator *= (const T &x) { for (size_type i = 0; i < D; i++) #pragma omp atomic v[i] *= x; return *this; } Vec<D,T> &operator /= (const Vec<D,T> &x) { for (size_type i = 0; i < D; i++) #pragma omp atomic v[i] /= x[i]; return *this; } Vec<D,T> &operator /= (const T &x) { for (size_type i = 0; i < D; i++) #pragma omp atomic v[i] /= x; return *this; } // Set each component to min/max of this and the other vector Vec<D,T> &min(const Vec<D,T> &x) { #pragma omp critical for (size_type i = 0; i < D; i++) if (x[i] < v[i]) v[i] = x[i]; return *this; } Vec<D,T> &max(const Vec<D,T> &x) { #pragma omp critical for (size_type i = 0; i < D; i++) if (x[i] > v[i]) v[i] = x[i]; return *this; } // Swap with another vector. (Also exists as a global function.) void swap(Vec<D,T> &x) { using namespace ::std; #pragma omp critical for (size_type i = 0; i < D; i++) swap(v[i], x[i]); } // Outside of class: + - * / % ^ << >> == != < > <= >= // Dot product with another vector (also exists as an operator) value_type dot(const Vec<D,T> &x) const { value_type total = v[0] * x[0]; for (size_type i = 1; i < D; i++) total += v[i] * x[i]; return total; } // Cross product with another vector (also exists as an operator) Vec<3,T> cross(const Vec<3,T> &x) const { VEC_STATIC_CHECK(D == 3); return Vec<3,T>(v[1]*x[2] - v[2]*x[1], v[2]*x[0] - v[0]*x[2], v[0]*x[1] - v[1]*x[0]); } // Some partial compatibility with std::valarray, plus generalizations value_type sum() const { value_type total = v[0]; for (size_type i = 1; i < D; i++) total += v[i]; return total; } value_type sumabs() const { using namespace ::std; value_type total = fabs(v[0]); for (size_type i = 1; i < D; i++) total += fabs(v[i]); return total; } value_type avg() const { return sum() / D; } value_type mean() const { return sum() / D; } value_type product() const { value_type total = v[0]; for (size_type i = 1; i < D; i++) total *= v[i]; return total; } value_type min() const { value_type m = v[0]; for (size_type i = 1; i < D; i++) if (v[i] < m) m = v[i]; return m; } value_type max() const { value_type m = v[0]; for (size_type i = 1; i < D; i++) if (v[i] > m) m = v[i]; return m; } Vec<D,T> apply(value_type func(value_type)) const { Vec<D,T> result(VEC_UNINITIALIZED); for (size_type i = 0; i < D; i++) result[i] = func(v[i]); return result; } Vec<D,T> apply(value_type func(const value_type&)) const { Vec<D,T> result(VEC_UNINITIALIZED); for (size_type i = 0; i < D; i++) result[i] = func(v[i]); return result; } Vec<D,T> cshift(int n) const { Vec<D,T> result(VEC_UNINITIALIZED); if (n < 0) n = (n % D) + D; for (size_type i = 0; i < D; i++) result[i] = v[(i+n)%D]; return result; } Vec<D,T> shift(int n) const { using namespace ::std; if (abs(n) >= D) return Vec<D,T>(); Vec<D,T> result; // Must be initialized to zero size_type start = n < 0 ? -n : 0; size_type stop = n > 0 ? D - n : D; for (size_type i = start; i < stop; i++) result[i] = v[i+n]; return result; } // TODO for C++11: std::get() }; // class Vec // Shorthands for particular flavors of Vecs typedef Vec<3,float> vec; typedef Vec<3,float> point; typedef Vec<2,float> vec2; typedef Vec<3,float> vec3; typedef Vec<4,float> vec4; typedef Vec<2,int> ivec2; typedef Vec<3,int> ivec3; typedef Vec<4,int> ivec4; // Nonmember operators that take two Vecs template <size_t D, class T> static inline const Vec<D,T> operator + (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = v1[i] + v2[i]; return result; } template <size_t D, class T> static inline const Vec<D,T> operator - (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = v1[i] - v2[i]; return result; } template <size_t D, class T> static inline const Vec<D,T> operator * (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = v1[i] * v2[i]; return result; } template <size_t D, class T> static inline const Vec<D,T> operator / (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = v1[i] / v2[i]; return result; } // Dot product template <size_t D, class T> static inline const T operator ^ (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; T sum = v1[0] * v2[0]; for (size_t i = 1; i < D; i++) sum += v1[i] * v2[i]; return sum; } #define DOT ^ // Cross product - only in 3 dimensions template <class T> static inline const Vec<3,T> operator % (const Vec<3,T> &v1, const Vec<3,T> &v2) { return Vec<3,T>(v1[1]*v2[2] - v1[2]*v2[1], v1[2]*v2[0] - v1[0]*v2[2], v1[0]*v2[1] - v1[1]*v2[0]); } #define CROSS % // Component-wise equality and inequality (#include the usual caveats // about comparing floats for equality...) template <size_t D, class T> static inline bool operator == (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; for (size_t i = 0; i < D; i++) if (v1[i] != v2[i]) return false; return true; } template <size_t D, class T> static inline bool operator != (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; for (size_t i = 0; i < D; i++) if (v1[i] != v2[i]) return true; return false; } // Comparison by lexicographical ordering - not necessarily useful on its own, // but necessary in order to put Vecs in sets, maps, etc. template <size_t D, class T> static inline bool operator < (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; for (size_t i = 0; i < D; i++) { if (v1[i] < v2[i]) return true; else if (v1[i] > v2[i]) return false; } return false; } template <size_t D, class T> static inline bool operator > (const Vec<D,T> &v1, const Vec<D,T> &v2) { return v2 < v1; } template <size_t D, class T> static inline bool operator <= (const Vec<D,T> &v1, const Vec<D,T> &v2) { return !(v2 < v1); } template <size_t D, class T> static inline bool operator >= (const Vec<D,T> &v1, const Vec<D,T> &v2) { return !(v1 < v2); } // Unary operators template <size_t D, class T> static inline const Vec<D,T> &operator + (const Vec<D,T> &v) { return v; } template <size_t D, class T> static inline const Vec<D,T> operator - (const Vec<D,T> &v) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = -v[i]; return result; } template <size_t D, class T> static inline bool operator ! (const Vec<D,T> &v) { return v.empty(); } // Vec/scalar operators template <size_t D, class T> static inline const Vec<D,T> operator * (const T &x, const Vec<D,T> &v) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = x * v[i]; return result; } template <size_t D, class T> static inline const Vec<D,T> operator * (const Vec<D,T> &v, const T &x) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = v[i] * x; return result; } template <size_t D, class T> static inline const Vec<D,T> operator / (const T &x, const Vec<D,T> &v) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = x / v[i]; return result; } template <size_t D, class T> static inline const Vec<D,T> operator / (const Vec<D,T> &v, const T &x) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = v[i] / x; return result; } // iostream operators template <size_t D, class T> static inline ::std::ostream &operator << (::std::ostream &os, const Vec<D,T> &v) { using namespace ::std; os << "("; for (size_t i = 0; i < D-1; i++) os << v[i] << ", "; return os << v[D-1] << ")"; } template <size_t D, class T> static inline ::std::istream &operator >> (::std::istream &is, Vec<D,T> &v) { using namespace ::std; char c1 = 0, c2 = 0; is >> c1; if (c1 == '(' || c1 == '[') { is >> v[0] >> ws >> c2; for (size_t i = 1; i < D; i++) { if (c2 == ',') is >> v[i] >> ws >> c2; else is.setstate(ios::failbit); } } if (c1 == '(' && c2 != ')') is.setstate(ios::failbit); else if (c1 == '[' && c2 != ']') is.setstate(ios::failbit); return is; } // Utility functions for square and cube, to go along with sqrt and cbrt template <class T> static inline T sqr(const T &x) { return x*x; } template <class T> static inline T cube(const T &x) { return x*x*x; } // Sign of a scalar. Note that sgn(0) == 1. template <class T> static inline T sgn(const T &x) { return (x < T(0)) ? T(-1) : T(1); } // Utility functions based on GLSL template <class T> static inline T fract(const T &x) { return x - floor(x); } template <class T> static inline T clamp(const T &x, const T &a, const T &b) { return x > a ? x < b ? x : b : a; // returns a on NaN } template <class T, class S> static inline T mix(const T &x, const T &y, const S &a) { return (S(1)-a) * x + a * y; } template <class T> static inline T step(const T &a, const T &x) { return x < a ? T(0) : T(1); } template <class T> static inline T smoothstep(const T &a, const T &b, const T &x) { if (b <= a) return step(x,a); T t = (x - a) / (b - a); return t <= T(0) ? T(0) : t >= T(1) ? T(1) : t * t * (T(3) - T(2) * t); } template <size_t D, class T> static inline T faceforward(const Vec<D,T> &N, const Vec<D,T> &I, const Vec<D,T> &Nref) { return ((Nref DOT I) < T(0)) ? N : -N; } template <size_t D, class T> static inline T reflect(const Vec<D,T> &I, const Vec<D,T> &N) { return I - (T(2) * (N DOT I)) * N; } template <size_t D, class T> static inline T refract(const Vec<D,T> &I, const Vec<D,T> &N, const T &eta) { using namespace ::std; T NdotI = N DOT I; T k = T(1) - sqr(eta) * (T(1) - sqr(NdotI)); return (k < T(0)) ? T(0) : eta * I - (eta * NdotI * sqrt(k)) * N; } // Squared length template <size_t D, class T> static inline const T len2(const Vec<D,T> &v) { using namespace ::std; T l2 = v[0] * v[0]; for (size_t i = 1; i < D; i++) l2 += v[i] * v[i]; return l2; } // Length template <size_t D, class T> static inline const T len(const Vec<D,T> &v) { using namespace ::std; return sqrt(len2(v)); } // Squared distance template <size_t D, class T> static inline const T dist2(const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; T d2 = sqr(v2[0]-v1[0]); for (size_t i = 1; i < D; i++) d2 += sqr(v2[i]-v1[i]); return d2; } // Distance template <size_t D, class T> static inline const T dist(const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; return sqrt(dist2(v1,v2)); } // In-place normalization to unit length template <size_t D, class T> static inline Vec<D,T> normalize(Vec<D,T> &v) { using namespace ::std; T l = len(v); if (unlikely(l <= T(0))) { v[0] = T(1); for (size_t i = 1; i < D; i++) v[i] = T(0); return v; } l = T(1) / l; for (size_t i = 0; i < D; i++) v[i] *= l; return v; } // In-place normalization to unit length template <size_t D, class T> static inline Vec<D,T> unit(const Vec<D,T> &v1) { using namespace ::std; Vec<D,T> v = v1; T l = len(v); if (unlikely(l <= T(0))) { v[0] = T(1); for (size_t i = 1; i < D; i++) v[i] = T(0); return v; } l = T(1) / l; for (size_t i = 0; i < D; i++) v[i] *= l; return v; } // Area-weighted triangle face normal template <class T> static inline T trinorm(const T &v0, const T &v1, const T &v2) { return (typename T::value_type) 0.5 * ((v1 - v0) CROSS (v2 - v0)); } // Angle between two vectors template <size_t D, class T> static inline const T angle(const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; return atan2(len(v1 CROSS v2), v1 DOT v2); } }; // namespace trimesh // POSIX / C99 compatibility functions for MSVS #ifdef _WIN32 #ifdef cbrt # undef cbrt #endif inline float cbrt(float x) { using namespace ::std; return (x < 0.0f) ? -pow(-x, 1.0f / 3.0f) : pow(x, 1.0f / 3.0f); } inline double cbrt(double x) { using namespace ::std; return (x < 0.0) ? -pow(-x, 1.0 / 3.0) : pow(x, 1.0 / 3.0); } inline long double cbrt(long double x) { using namespace ::std; return (x < 0.0L) ? -pow(-x, 1.0L / 3.0L) : pow(x, 1.0L / 3.0L); } #ifdef round # undef round #endif inline float round(float x) { return (x < 0.0f) ? float(int(x - 0.5f)) : float(int(x + 0.5f)); } inline double round(double x) { return (x < 0.0f) ? double(int(x - 0.5)) : double(int(x + 0.5)); } inline long double round(long double x) { return (x < 0.0f) ? (long double)(int(x - 0.5L)) : (long double)(int(x + 0.5L)); } #ifdef trunc # undef trunc #endif inline float trunc(float x) { return (x < 0.0f) ? float(int(x)) : float(int(x)); } inline double trunc(double x) { return (x < 0.0f) ? double(int(x)) : double(int(x)); } inline long double trunc(long double x) { return (x < 0.0f) ? (long double)(int(x)) : (long double)(int(x)); } #endif // _WIN32 // Generic macros for declaring 1-, 2-, and 3- argument // componentwise functions on Vecs. #define VEC_DECLARE_ONEARG(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(v[i]); \ return result; \ } // Vector-scalar, scalar-vector, and componentwise vector-vector versions #define VEC_DECLARE_TWOARG_VS(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const T &a) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(v[i], a); \ return result; \ } #define VEC_DECLARE_TWOARG_SV(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const T &a, const trimesh::Vec<D,T> &v) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(a, v[i]); \ return result; \ } #define VEC_DECLARE_TWOARG_VV(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const trimesh::Vec<D,T> &w) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(v[i], w[i]); \ return result; \ } #define VEC_DECLARE_THREEARG_VSS(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const T &a, const T &b) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(v[i], a, b); \ return result; \ } #define VEC_DECLARE_THREEARG_SSV(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const T &a, const T &b, const trimesh::Vec<D,T> &v) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(a, b, v[i]); \ return result; \ } #define VEC_DECLARE_THREEARG_VVV(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const trimesh::Vec<D,T> &w, const trimesh::Vec<D,T> &x) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(v[i], w[i], x[i]); \ return result; \ } // The following is the list of functions in C89 and C++98, with the exception // of frexp, ldexp, and modf (which have irregular calling conventions). // They are supposed to be in namespace std, but Visual Studio and some // older compilers also declare them in the global namespace. // In the name of compatibility, we (reluctantly) do likewise. VEC_DECLARE_ONEARG(acos) VEC_DECLARE_ONEARG(asin) VEC_DECLARE_ONEARG(atan) VEC_DECLARE_TWOARG_VV(atan2) VEC_DECLARE_ONEARG(ceil) VEC_DECLARE_ONEARG(cos) VEC_DECLARE_ONEARG(cosh) VEC_DECLARE_ONEARG(exp) VEC_DECLARE_ONEARG(fabs) VEC_DECLARE_ONEARG(floor) VEC_DECLARE_TWOARG_VS(fmod) VEC_DECLARE_TWOARG_VV(fmod) VEC_DECLARE_ONEARG(log) VEC_DECLARE_ONEARG(log10) VEC_DECLARE_TWOARG_VS(pow) VEC_DECLARE_TWOARG_SV(pow) VEC_DECLARE_TWOARG_VV(pow) VEC_DECLARE_ONEARG(sin) VEC_DECLARE_ONEARG(sinh) VEC_DECLARE_ONEARG(sqrt) VEC_DECLARE_ONEARG(tan) VEC_DECLARE_ONEARG(tanh) namespace std { using ::acos; using ::asin; using ::atan; using ::atan2; using ::ceil; using ::cos; using ::cosh; using ::exp; using ::fabs; using ::floor; using ::fmod; using ::log; using ::log10; using ::pow; using ::sin; using ::sinh; using ::sqrt; using ::tan; using ::tanh; }; // These are only in namespace std. namespace std { VEC_DECLARE_TWOARG_VS(min) VEC_DECLARE_TWOARG_SV(min) VEC_DECLARE_TWOARG_VV(min) VEC_DECLARE_TWOARG_VS(max) VEC_DECLARE_TWOARG_SV(max) VEC_DECLARE_TWOARG_VV(max) // Swap two Vecs. Not atomic, unlike class method. template <size_t D, class T> static inline void swap(const ::trimesh::Vec<D,T> &v1, const ::trimesh::Vec<D,T> &v2) { for (size_t i = 0; i < D; i++) swap(v1[i], v2[i]); } }; // These are POSIX and are commonly used. Global namespace. // Compatibility versions of these for MSVC are above. VEC_DECLARE_ONEARG(cbrt) VEC_DECLARE_ONEARG(round) VEC_DECLARE_ONEARG(trunc) // These are new functions declared in namespace trimesh. namespace trimesh { VEC_DECLARE_ONEARG(sqr) VEC_DECLARE_ONEARG(cube) VEC_DECLARE_ONEARG(sgn) VEC_DECLARE_ONEARG(fract) VEC_DECLARE_THREEARG_VSS(clamp) VEC_DECLARE_THREEARG_VVV(clamp) VEC_DECLARE_TWOARG_SV(step) VEC_DECLARE_TWOARG_VV(step) VEC_DECLARE_THREEARG_SSV(smoothstep) VEC_DECLARE_THREEARG_VVV(smoothstep) }; #undef VEC_DECLARE_ONEARG #undef VEC_DECLARE_TWOARG_VS #undef VEC_DECLARE_TWOARG_SV #undef VEC_DECLARE_TWOARG_VV #undef VEC_DECLARE_THREEARG_VSS #undef VEC_DECLARE_THREEARG_SSV #undef VEC_DECLARE_THREEARG_VVV // Both valarrays and GLSL use abs() on a vector to mean fabs(). // Let's do the same... template < ::std::size_t D, class T > static inline trimesh::Vec<D,T> abs(const trimesh::Vec<D,T> &v) { return fabs(v); } namespace std { using ::abs; }; #endif
GraphCutBase.h
/* * MIT License * * Copyright (c) 2018-2019 Benjamin Köhler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef BKTOOLS_GRAPHCUTBASE_H #define BKTOOLS_GRAPHCUTBASE_H #include <algorithm> #include <array> #include <cmath> #include <cstdint> #include <functional> #include <iostream> #include <limits> #include <numeric> #include <utility> #include <tuple> #include <type_traits> #include <vector> #include <bkTools/graphcut/GraphCutBase_MemberAccess.h> #include <bkTools/graphcut/gc_definitions.h> #ifdef BK_EMIT_PROGRESS #include <bkTools/progress/Progress.h> #include <bkTools/progress/GlobalProgressManager.h> #include <bkTools/localization/GlobalLocalizationManager.h> #endif namespace bk::gc_details { template<int TDims, typename TDerived> class GraphCutBase : protected GraphCutBase_MemberAccess<TDims, GraphCutBase<TDims, TDerived>> { //==================================================================================================== //===== ASSERTIONS //==================================================================================================== static_assert(TDims >= 2, "invalid num dimensions"); //==================================================================================================== //===== DEFINITIONS //==================================================================================================== using self_type = GraphCutBase<TDims, TDerived>; using derived_type = TDerived; using base_type = GraphCutBase_MemberAccess<TDims, GraphCutBase<TDims, TDerived>>; protected: using gc = gcdef<TDims>; using ptNd_type = std::array<double, TDims>; using pt2Nd_type = std::array<double, 2 * TDims>; template<typename T> using vector_grid_type = wrap_vector_t<T, TDims>; public: using flag_type = typename gc::flag_type; using id_type = typename gc::id_type; friend base_type; /// @{ -------------------------------------------------- GET NUM DIMENSIONS [[nodiscard]] static constexpr int NumDimensions() noexcept { return TDims; } /// @} //==================================================================================================== //===== MEMBERS //==================================================================================================== protected: id_type _size; vector_grid_type<ptNd_type> _edge_capacity; vector_grid_type<pt2Nd_type> _residual; vector_grid_type<int> _distance_to_terminal; vector_grid_type<int> _timestamp; vector_grid_type<flag_type> _flags; std::vector<id_type> _connected_to_source; std::vector<id_type> _connected_to_sink; bool _up2date; vector_grid_type<bool> _band; //==================================================================================================== //===== CONSTRUCTORS & DESTRUCTOR //==================================================================================================== public: /// @{ -------------------------------------------------- CTOR GraphCutBase() : _up2date(false) { _size.fill(0); } GraphCutBase(const self_type&) = default; GraphCutBase(self_type&&) noexcept = default; /// @} /// @{ -------------------------------------------------- DTOR ~GraphCutBase() = default; /// @} //==================================================================================================== //===== GETTER //==================================================================================================== /// @{ -------------------------------------------------- HELPERS: TO DERIVED private: [[nodiscard]] constexpr derived_type* deriv() noexcept { return static_cast<derived_type*>(this); } [[nodiscard]] constexpr const derived_type* deriv() const noexcept { return static_cast<const derived_type*>(this); } protected: /// @} /// @{ -------------------------------------------------- GET EDGE CAPACITY [[nodiscard]] ptNd_type& edge_capacity(const id_type& n) { return this->get_from_vector_grid(_edge_capacity, n); } [[nodiscard]] const ptNd_type& edge_capacity(const id_type& n) const { return this->get_from_vector_grid(_edge_capacity, n); } /// @} /// @{ -------------------------------------------------- CONNECTED TO SOURCE [[nodiscard]] id_type& connected_to_source(const id_type& n) { return this->get_from_vector_grid(_connected_to_source, n); } [[nodiscard]] const id_type& connected_to_source(const id_type& n) const { return this->get_from_vector_grid(_connected_to_source, n); } /// @} /// @{ -------------------------------------------------- CONNECTED TO SINK [[nodiscard]] id_type& connected_to_sink(const id_type& n) { return this->get_from_vector_grid(_connected_to_sink, n); } [[nodiscard]] const id_type& connected_to_sink(const id_type& n) const { return this->get_from_vector_grid(_connected_to_sink, n); } /// @} protected: /// @{ -------------------------------------------------- SETS [[nodiscard]] bool is_in_source_set(const id_type& n) const { return this->flag(n) & gc::FLAG_SOURCE_SET(); } [[nodiscard]] bool is_in_sink_set(const id_type& n) const { return this->flag(n) & gc::FLAG_SINK_SET(); } [[nodiscard]] bool is_in_free_set(const id_type& n) const { return this->flag(n) & gc::FLAG_FREE_SET(); } [[nodiscard]] bool are_in_same_set(const id_type& p, const id_type& q) const { const flag_type flag_p = this->flag(p); const flag_type flag_q = this->flag(q); return (flag_p & gc::FLAG_SOURCE_SET() && flag_q & gc::FLAG_SOURCE_SET()) || (flag_p & gc::FLAG_SINK_SET() && flag_q & gc::FLAG_SINK_SET()); } /// @} public: /// @{ -------------------------------------------------- GET NUM CONNECTED SINK/SOURCE NODES [[nodiscard]] unsigned int num_nodes_connected_to_source() const { return _connected_to_source.size(); } [[nodiscard]] unsigned int num_nodes_connected_to_sink() const { return _connected_to_sink.size(); } /// @} /// @{ -------------------------------------------------- GET SINK/SOURCE NODE LIST [[nodiscard]] const std::vector<id_type>& nodes_connected_to_source() const { return _connected_to_source; } [[nodiscard]] const std::vector<id_type>& nodes_connected_to_sink() const { return _connected_to_sink; } /// @} protected: //==================================================================================================== //===== SETTER //==================================================================================================== /// @{ -------------------------------------------------- OPERATOR = [[maybe_unused]] self_type& operator=(const self_type&) = default; [[maybe_unused]] self_type& operator=(self_type&&) noexcept = default; /// @} /// @{ -------------------------------------------------- SET PARENT void set_source_as_parent(const id_type& child) { flag_type& f = this->flag(child); f &= ~gc::PARENT_IS_SINK(); f |= gc::PARENT_IS_SOURCE(); set_source_set(child); } void set_sink_as_parent(const id_type& child) { flag_type& f = this->flag(child); f &= ~gc::PARENT_IS_SOURCE(); f |= gc::PARENT_IS_SINK(); set_sink_set(child); } /// @} /// @{ -------------------------------------------------- SET SET void set_source_set(const id_type& node) { flag_type& f = this->flag(node); f &= ~gc::FLAG_SINK_SET(); f &= ~gc::FLAG_FREE_SET(); f |= gc::FLAG_SOURCE_SET(); } void set_sink_set(const id_type& node) { flag_type& f = this->flag(node); f &= ~gc::FLAG_SOURCE_SET(); f &= ~gc::FLAG_FREE_SET(); f |= gc::FLAG_SINK_SET(); } /// @} /// @{ -------------------------------------------------- EDGE CAPACITY void set_edge_capacity(const id_type& node, unsigned char dir, double cap) { this->residual(node)[dir] = cap; if (cap <= 0) { this->flag(node) |= (gc::FLAG_EDGE_PREDECESSOR_IS_FULL(0) << dir); } else { this->flag(node) &= ~(gc::FLAG_EDGE_PREDECESSOR_IS_FULL(0) << dir); } } void set_edge_capacity(const id_type& p, const id_type& q, double cap) { set_edge_capacity(p, gc::DIFF_TO_EDGE_ID(gc::PAIRWISE_DIFFERENCE(q, p)), cap); } void set_edge_capacity_mutual(const id_type& p, unsigned char dir, double cap) { set_edge_capacity(p, dir, cap); for (int dimId = 0; dimId < TDims; ++dimId) { if (p[dimId] > 0 && dir == gc::ID_EDGE_PREDECESSOR(dimId)) { set_edge_capacity(gc::NEIGHBOR_PREDECESSOR(p, dimId), gc::ID_EDGE_SUCCESSOR(dimId), cap); break; } else if (p[dimId] < _size[dimId] - 1 && dir == gc::ID_EDGE_SUCCESSOR(dimId)) { set_edge_capacity(gc::NEIGHBOR_SUCCESSOR(p, dimId), gc::ID_EDGE_PREDECESSOR(dimId), cap); break; } } } void set_edge_capacity_mutual(const id_type& p, const id_type& q, double cap) { set_edge_capacity_mutual(p, gc::DIFF_TO_EDGE_ID(gc::PAIRWISE_DIFFERENCE(q, p)), cap); } /// @} //==================================================================================================== //===== FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- HELPER: RESET private: template<int I> void _reset(id_type& p) { for (int x = 0; x < _size[I]; ++x) { p[I] = x; if constexpr (I != TDims - 1) { _reset<I + 1>(p); } else { for (int dimId = 0; dimId < TDims; ++dimId) { if (p[dimId] < _size[dimId] - 1) { const double w = edge_capacity(p)[dimId]; set_edge_capacity_mutual(p, gc::ID_EDGE_SUCCESSOR(dimId), w); } } this->timestamp(p) = 0; this->flag(p) = gc::FLAG_FREE_SET(); this->distance_to_terminal(p) = gc::invalid_distance; this->band(p) = true; } } // for x } public: /// @} /// @{ -------------------------------------------------- RESET void reset() { #pragma omp parallel for for (int x = 0; x < _size[0]; ++x) { id_type p; p.fill(0); p[0] = x; _reset<1>(p); } _up2date = false; deriv()->reset_impl(); } /// @} /// @{ -------------------------------------------------- HELPER: INIT private: void _init(const std::array<unsigned int, TDims>& img_size) { for (int dimId = 0; dimId < TDims; ++dimId) { _size[dimId] = static_cast<int>(img_size[dimId]); } pt2Nd_type default_residual; default_residual.fill(0.0); ptNd_type default_edge_capacity; default_edge_capacity.fill(0.0); resize_wrapped_vector<TDims>(_edge_capacity, _size, default_edge_capacity); resize_wrapped_vector<TDims>(_residual, _size, default_residual); resize_wrapped_vector<TDims>(_distance_to_terminal, _size, gc::invalid_distance); resize_wrapped_vector<TDims>(_timestamp, _size, 0); resize_wrapped_vector<TDims>(_flags, _size, gc::FLAG_FREE_SET()); resize_wrapped_vector<TDims>(_band, _size, true); _up2date = false; } /// @} /// @{ -------------------------------------------------- HELPER: INIT FROM INTENSITY IMAGE private: template<int I, typename TGrayImage, typename TFnPixelAt, typename TFnScale> void _init_from_intensity_image(const TGrayImage& img, const std::array<double, TDims>& img_scale, TFnPixelAt function_pixel_at, id_type& id, TFnScale fn_scale, double weight_function_tolerance) { for (int x = 0; x < _size[I]; ++x) { id[I] = x; if constexpr (I != TDims - 1) { _init_from_intensity_image<I + 1>(img, img_scale, function_pixel_at, id, fn_scale, weight_function_tolerance); } else { const double x0 = fn_scale(function_pixel_at(img, id)); ptNd_type& edgecap = this->edge_capacity(id); for (int dimId = 0; dimId < TDims; ++dimId) { if (id[dimId] < _size[dimId] - 1) { const double x1 = function_pixel_at(img, gc::NEIGHBOR_SUCCESSOR(id, dimId)); const double diff = (fn_scale(x1) - x0) / img_scale[dimId]; const double w = gc::weight_function(diff, weight_function_tolerance); edgecap[dimId] = w; } } // for dimId } } // for x } public: /// @} /// @{ -------------------------------------------------- INIT FROM INTENSITY IMAGE template<typename TGrayImage, typename TFnPixelAt> void init_from_intensity_image(const TGrayImage& img, const std::array<unsigned int, TDims>& img_size, const std::array<double, TDims>& img_scale, const std::array<double, 2>& minmaxPixelValue, TFnPixelAt function_pixel_at, double weight_function_tolerance = 0.5) { #ifdef BK_EMIT_PROGRESS bk::Progress& prog = bk_progress.emplace_task(_size[0] + 1, ___("Initializing graph cut from intensity image")); #endif _init(img_size); #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif auto fn_scale = [&](double x) -> double { return 255.0 * (x - static_cast<double>(minmaxPixelValue[0])) / (static_cast<double>(minmaxPixelValue[1]) - static_cast<double>(minmaxPixelValue[0])); }; #pragma omp parallel for for (int x = 0; x < _size[0]; ++x) { id_type id; id[0] = x; _init_from_intensity_image<1>(img, img_scale, function_pixel_at, id, fn_scale, weight_function_tolerance); #ifdef BK_EMIT_PROGRESS #pragma omp critical(gc_init_from_intensity_image) { prog.increment(1); } #endif } #ifdef BK_EMIT_PROGRESS prog.set_finished(); #endif } /// @} /// @{ -------------------------------------------------- HELPER: INIT FROM WEIGHT IMAGE private: template<int I, typename TWeightImage, typename TFnPixelAt> void _init_from_weight_image(const TWeightImage& img, TFnPixelAt function_weight_at, id_type& id) { for (int x = 0; x < _size[I]; ++x) { id[I] = x; if constexpr (I != TDims - 1) { _init_from_weight_image<I + 1>(img, function_weight_at, id); } else { ptNd_type& edgecap = this->edge_capacity(id); for (int dimId = 0; dimId < TDims; ++dimId) { if (id[dimId] < _size[dimId] - 1) { edgecap[dimId] = function_weight_at(img, id, dimId); } } // for dimId } } // for x } public: /// @} /// @{ -------------------------------------------------- INIT FROM WEIGHT IMAGE template<typename TWeightImage, typename TFnPixelAt> void init_from_weight_image(const TWeightImage& img, const std::array<unsigned int, TDims>& img_size, TFnPixelAt function_weight_at) { #ifdef BK_EMIT_PROGRESS bk::Progress& prog = bk_progress.emplace_task(_size[0] + 1, ___("Initializing graph cut from weight image")); #endif _init(img_size); #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif #pragma omp parallel for for (int x = 0; x < _size[0]; ++x) { id_type id; id[0] = x; _init_from_weight_image<1>(img, function_weight_at, id); #ifdef BK_EMIT_PROGRESS #pragma omp critical(gc_init_from_weight_image) { prog.increment(1); } #endif } #ifdef BK_EMIT_PROGRESS prog.set_finished(); #endif } /// @} /// @{ -------------------------------------------------- HELPERS: AUTOMATIC OUTSIDE IDS WITHIN NARROW BAND private: template<int I> void _create_narrow_band(const id_type& source_node, const id_type& band_radius, id_type& p) { if constexpr (I != TDims - 1) { for (int i = std::max(0, source_node[I] - band_radius[I]); i < std::min(_size[I], source_node[I] + band_radius[I]); ++i) { p[I] = i; _create_narrow_band<I + 1>(source_node, band_radius, p); } } else { for (int i = std::max(0, source_node[I] - band_radius[I]); i < std::min(_size[I], source_node[I] + band_radius[I]); ++i) { p[I] = i; #pragma omp critical (gc_narrow_band) { this->band(p) = false; } } } } template<int I> void _sink_from_narrow_band(id_type& p) { if constexpr (I != TDims - 1) { for (int i = 0; i < _size[I]; ++i) { p[I] = i; _sink_from_narrow_band<I + 1>(p); } } else { for (int i = 0; i < _size[I]; ++i) { p[I] = i; const bool make_sink = this->band(p); if (make_sink) { #pragma omp critical (gc_narrow_band) { add_sink_node(p); } } } } } public: /// @} /// @{ -------------------------------------------------- HELPER: RESET NARROW BAND private: template<int I> void _reset_narrow_band(id_type& p) { for (int x = 0; x < _size[I]; ++x) { p[I] = x; if constexpr (I != TDims - 1) { _reset_narrow_band<I + 1>(p); } else { this->band(p) = true; } } // for x } void _reset_narrow_band() { #pragma omp parallel for for (int x = 0; x < _size[0]; ++x) { id_type p; p.fill(0); p[0] = x; _reset_narrow_band<1>(p); } } public: /// @} /// @{ -------------------------------------------------- AUTOMATIC OUTSIDE IDS WITHIN NARROW BAND void narrow_band_sink_ids(double band_width_percent, unsigned int min_width_pixel) { id_type band_radius; for (int i = 0; i < TDims; ++i) { band_radius[i] = std::max(static_cast<int>(min_width_pixel), static_cast<int>(std::round(static_cast<double>(_size[i]) * band_width_percent))); } #ifdef BK_EMIT_PROGRESS bk::Progress& prog = bk_progress.emplace_task(_connected_to_source.size() + 1 + _size[0], ___("Creating graph cut narrow band")); #endif _reset_narrow_band(); #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif #pragma omp parallel for for (unsigned int i = 0; i < _connected_to_source.size(); ++i) { id_type p; _create_narrow_band<0>(_connected_to_source[i], band_radius, p); #ifdef BK_EMIT_PROGRESS #pragma omp critical { prog.increment(1); } #endif } #pragma omp parallel for for (int i = 0; i < _size[0]; ++i) { id_type p; p[0] = i; _sink_from_narrow_band<1>(p); #ifdef BK_EMIT_PROGRESS #pragma omp critical { prog.increment(1); } #endif } #ifdef BK_EMIT_PROGRESS prog.set_finished(); #endif } /// @} /// @{ -------------------------------------------------- ADD SOURCE/SINK NODE template<typename... TIds, std::enable_if_t<(sizeof...(TIds) > 1)>* = nullptr> void add_source_node(TIds... ids) { static_assert(sizeof...(TIds) == TDims, "invalid number of arguments"); static_assert(std::conjunction_v<std::is_arithmetic<TIds>...>, "arithmetic type arguments required"); _connected_to_source.emplace_back(id_type{static_cast<int>(ids)...}); _up2date = false; } void add_source_node(const id_type& p) { _connected_to_source.emplace_back(p); _up2date = false; } template<typename... TIds, std::enable_if_t<(sizeof...(TIds) > 1)>* = nullptr> void add_sink_node(TIds... ids) { static_assert(sizeof...(TIds) == TDims, "invalid number of arguments"); static_assert(std::conjunction_v<std::is_arithmetic<TIds>...>, "arithmetic type arguments required"); _connected_to_sink.emplace_back(id_type{static_cast<int>(ids)...}); _up2date = false; } void add_sink_node(const id_type& p) { _connected_to_sink.emplace_back(p); _up2date = false; } /// @} /// @{ -------------------------------------------------- CLEAR SOURCE/SINK NODES void clear_source_nodes() { _connected_to_source.clear(); _up2date = false; } void clear_sink_nodes() { _connected_to_sink.clear(); _up2date = false; } /// @} /// @{ -------------------------------------------------- IS IN SEGMENTATION template<typename... TIds, std::enable_if_t<(sizeof...(TIds) > 1)>* = nullptr> [[nodiscard]] bool is_in_segmentation(TIds... ids) { static_assert(sizeof...(TIds) == TDims, "invalid number of arguments"); static_assert(std::conjunction_v<std::is_arithmetic<TIds>...>, "arithmetic type arguments required"); return is_in_segmentation(id_type{static_cast<int>(ids)...}); } [[nodiscard]] bool is_in_segmentation(const id_type& p) const { return this->flag(p) & gc::FLAG_SOURCE_SET(); } /// @} }; // class GraphCutBase } // namespace bk #endif //BKTOOLS_GRAPHCUTBASE_H
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ''fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ''classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "MagickCore/studio.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { double center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { double tau; ssize_t left, right; double mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { double tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static double OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const double,double *), ZeroCrossHistogram(double *,const double,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const double cluster_threshold, % const double weighting_exponent, % const MagickBooleanType verbose,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType Classify(Image *image,short **extrema, const double cluster_threshold, const double weighting_exponent,const MagickBooleanType verbose, ExceptionInfo *exception) { #define SegmentImageTag "Segment/Image" CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickOffsetType progress; double *free_squares; MagickStatusType status; register ssize_t i; register double *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++,2* image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowBinaryException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(double) i*(double) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *clust; register const PixelInfo *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,0,q); for (clust=head; clust != (Cluster *) NULL; clust=clust->next) { if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >= (clust->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <= (clust->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >= (clust->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <= (clust->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >= (clust->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <= (clust->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(image,(Quantum) clust->id,q); break; } } if (clust == (Cluster *) NULL) { double distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(image,(Quantum) j,q); } } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image,exception); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(double *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const double *histogram, % double *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of doubles is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const double *histogram, double *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % PixelInfo *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, PixelInfo *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; double threshold; register const Quantum *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetPixelInfo(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const Quantum *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++; p+=GetPixelChannels(image); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register double sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(double) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % double OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static double OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; double average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(double *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(double) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(double *) RelinquishMagickMemory(derivative); second_derivative=(double *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(double) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const double tau, % double *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const double tau, double *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateGammaMap"); alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI)); beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=alpha*sum; } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold, ExceptionInfo *exception) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace,exception); InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose, exception); (void) TransformImageColorspace(image,previous_colorspace,exception); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(double *second_derivative, % const double smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of doubles representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(double *second_derivative, const double smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
merge-sort-recursive.c
/* @file prefix-sum-parallel.c * @author Robin Vermes * @author Valentin Laurent * @details recursive merge-sort with openmp */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> void display(int * t, int length_t) { int i = 0; for (; i < length_t; ++i) { printf("%d => %d\n", i, t[i]); } } void exchange(int * a, int * b) { int temp = *b; *b = *a; *a = temp; } int binary_search(int x, int * array, int first, int last) { int low = first; int high = fmax(first, last + 1); int mid = 0; while (low < high) { mid = (int) floor((low + high)/2.0); if (x <= array[mid]) { high = mid; } else { low = mid + 1; } } return high; } int * merge(int * u, int first_u, int last_u, int first_v, int last_v, int * t, int first_t, int n) { int n1 = last_u - first_u + 1; int n2 = last_v - first_v + 1; if (n1 < n2) { exchange(&n1, &n2); exchange(&first_u, &first_v); exchange(&last_u, &last_v); } if (n1 != 0) { int mid_u = floor((first_u + last_u)/2); int mid_v = binary_search(u[mid_u], u, first_v, last_v); int mid_t = first_t + (mid_u - first_u) + (mid_v - first_v); t[mid_t] = u[mid_u]; #pragma omp task shared(u, t) firstprivate(mid_u, last_u, mid_v, last_v, mid_t) merge(u, first_u, mid_u - 1, first_v, mid_v - 1, t, first_t, n); #pragma omp task shared(u, t) firstprivate(mid_u, last_u, mid_v, last_v, mid_t) merge(u, mid_u + 1, last_u, mid_v, last_v, t, mid_t + 1, n); /*sync*/ #pragma omp taskwait } return t; } int main(int argc, char const *argv[]) { /*ARGUMENT CHECKING*/ if (argc < 2) { printf("USAGE : %s <N>\n", argv[0]); exit(1); } /*INITIALIZATION*/ /*sizes of table*/ int n = atoi(argv[1]); printf("----------------------------------\n"); printf("start merging with n = %d\n", n); /*tables creation*/ int * tab = (int *) malloc (2*n*sizeof(int)); /*indices*/ int i = 0; int i_even = 0; int i_odd = n; /*initialization of tables*/ for (; i < 2*n; ++i) { if (i % 2 == 0) { tab[i_even] = i; i_even++; } else { tab[i_odd] = i; i_odd++; } } /*MERGE SORT*/ int * tab_sorted = (int *) malloc (2*n*sizeof(int)); double start = omp_get_wtime(); #pragma omp parallel { #pragma omp single { tab_sorted = merge(tab, 0, n-1, n, 2*n-1, tab_sorted, 0, n); printf("number of threads used : %d\n", omp_get_num_threads()); } } printf("user time used for parallel merging : %f\n", omp_get_wtime() - start); /*DISPLAY*/ display(tab_sorted, 2*n); /*FREE RESOURCES*/ free(tab_sorted); free(tab); return 0; }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
denserasternonsimd.h
#pragma once #include "gdx/cell.h" #include "gdx/cpupredicates-private.h" #include "gdx/eigeniterationsupport-private.h" #include "gdx/exception.h" #include "gdx/nodatapredicates-private.h" #include "gdx/rasterchecks.h" #include "gdx/rasteriterator.h" #include "gdx/rastermetadata.h" #include "infra/cast.h" #include "infra/span.h" #include "infra/string.h" #include <Eigen/Core> #include <algorithm> #include <cassert> #include <vector> namespace gdx::nosimd { template <typename T> class DenseRaster { public: using value_type = T; using size_type = std::size_t; using data_type = Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>; using nodata_type = std::optional<value_type>; using pointer = T*; using const_pointer = const T*; using iterator = pointer; using const_iterator = const_pointer; static constexpr bool raster_type_has_nan = std::numeric_limits<T>::has_quiet_NaN; static constexpr bool with_nodata = true; static constexpr T NaN = std::numeric_limits<T>::quiet_NaN(); static constexpr bool typeHasNaN() { return raster_type_has_nan; } DenseRaster() = default; DenseRaster(int32_t rows, int32_t cols) : _meta(rows, cols) , _data(rows, cols) { } explicit DenseRaster(RasterMetadata meta) : _meta(std::move(meta)) , _data(_meta.rows, _meta.cols) { init_nodata_values(); } DenseRaster(int32_t rows, int32_t cols, T fillValue) : DenseRaster(RasterMetadata(rows, cols), fillValue) { } DenseRaster(const RasterMetadata& meta, T fillValue) : _meta(meta) , _data(meta.rows, meta.cols) { if constexpr (raster_type_has_nan) { // make sure we fill tha raster with NaNs if the fill value is the nodata value if (_meta.nodata.has_value() && fillValue == static_cast<T>(*_meta.nodata)) { fillValue = NaN; } } fill(fillValue); } DenseRaster(int32_t rows, int32_t cols, std::span<const T> data) : DenseRaster(RasterMetadata(rows, cols), data) { } DenseRaster(const RasterMetadata& meta, std::span<const T> data) : _meta(meta) , _data(meta.rows, meta.cols) { throw_on_datasize_mismatch(meta.rows, meta.cols, data.size()); std::copy(data.begin(), data.end(), _data.data()); init_nodata_values(); } DenseRaster(const RasterMetadata& meta, data_type&& data) : _meta(meta) , _data(data) { if (inf::truncate<int32_t>(_data.size()) != meta.rows * meta.cols) { throw InvalidArgument("Invalid data size provided"); } init_nodata_values(); } DenseRaster(DenseRaster<T>&&) noexcept = default; DenseRaster(const DenseRaster<T>& other) = delete; DenseRaster& operator=(DenseRaster<T>&&) = default; DenseRaster& operator=(const DenseRaster<T>& other) = delete; void resize_and_fill(int32_t rows, int32_t cols, value_type value) { resize(rows, cols); fill(value); } void resize(int32_t rows, int32_t cols) { _meta.rows = rows; _meta.cols = cols; _data.resize(rows, cols); } void resize(int32_t rows, int32_t cols, std::optional<double> nodata) { _meta.rows = rows; _meta.cols = cols; _meta.nodata = nodata; _data.resize(rows, cols); } void set_metadata(RasterMetadata meta) { if (meta.rows * meta.cols != ssize()) { throw InvalidArgument("Cannot change metadata: invalid size"); } _meta = std::move(meta); } DenseRaster<T> copy() const { DenseRaster<T> dst(_meta); dst._data = _data; return dst; } auto begin() { return Eigen::begin(_data); } auto begin() const { return cbegin(); } auto cbegin() const { return Eigen::cbegin(_data); } auto end() { return Eigen::end(_data); } auto end() const { return cend(); } auto cend() const { return Eigen::cend(_data); } const value_type* data() const noexcept { return _data.data(); } value_type* data() noexcept { return _data.data(); } bool has_nodata() const noexcept { if (_meta.nodata.has_value()) { if constexpr (raster_type_has_nan) { return std::any_of(begin(), end(), [](T value) { return std::isnan(value); }); } else { return std::any_of(begin(), end(), [nod = static_cast<T>(*_meta.nodata)](T value) { return value == nod; }); } } return false; } std::optional<T> nodata() const noexcept { return inf::optional_cast<T>(_meta.nodata); } std::size_t size() const noexcept { return _data.size(); } std::ptrdiff_t ssize() const noexcept { assert(_data.size() <= std::numeric_limits<std::ptrdiff_t>::max()); return static_cast<std::ptrdiff_t>(_data.size()); } bool empty() const noexcept { return _data.size() == 0; } void collapse_data() { // no collapse needed for non floating point types if constexpr (raster_type_has_nan) { if (_meta.nodata.has_value() && !std::isnan(*_meta.nodata)) { std::transform(begin(), end(), begin(), [nod = inf::truncate<T>(*_meta.nodata)](T value) { return std::isnan(value) ? nod : value; }); } } } const RasterMetadata& metadata() const noexcept { return _meta; } void set_projection(int32_t epsg) { _meta.set_projection_from_epsg(epsg); } void clear_projection() { _meta.projection.clear(); } void set_nodata(double newValue) { if constexpr (!raster_type_has_nan) { if (std::isnan(newValue)) { throw InvalidArgument("Nodata value cannot be NaN for integral rasters"); } } _meta.nodata = newValue; } void replace_nodata(T newValue) { const auto dataSize = _data.size(); for (int i = 0; i < dataSize; ++i) { if (is_nodata(i)) { _data(i) = newValue; } } _meta.nodata.reset(); } void turn_value_into_nodata(T value) { const auto dataSize = _data.size(); for (int i = 0; i < dataSize; ++i) { if (_data(i) == value) { mark_as_nodata(i); } } } // assigns the value to all the elements of the raster, even nodata void fill(value_type value) { std::fill(begin(), end(), value); } // assigns the value to all the elements of the raster, leaving nodata values intact void fill_values(value_type value) { std::fill(value_begin(*this), value_end(*this), value); } // Makes all elements of the raster nodata values void fill_with_nodata() { if (_meta.nodata.has_value()) { if constexpr (raster_type_has_nan) { fill(NaN); } else { fill(static_cast<T>(*_meta.nodata)); } } } int32_t rows() const noexcept { return _meta.rows; } int32_t cols() const noexcept { return _meta.cols; } void mark_as_data(std::size_t /*index*/) noexcept { } void mark_as_data(Cell /*cell*/) noexcept { } void mark_as_data(int32_t /*row*/, int32_t /*col*/) noexcept { } void mark_as_nodata(std::size_t index) { assert(_meta.nodata.has_value()); if (_meta.nodata.has_value()) { if constexpr (raster_type_has_nan) { _data(index) = NaN; } else { _data(index) = static_cast<T>(*_meta.nodata); } } } void mark_as_nodata(int32_t row, int32_t col) { assert(_meta.nodata.has_value()); if (_meta.nodata.has_value()) { if constexpr (raster_type_has_nan) { _data(row, col) = NaN; } else { _data(row, col) = static_cast<T>(*_meta.nodata); } } } void mark_as_nodata(Cell cell) { mark_as_nodata(cell.r, cell.c); } std::optional<value_type> optional_value(std::size_t index) const noexcept { if (is_nodata(index)) { return std::optional<value_type>(); } else { return _data(index); } } template <typename VarType> std::optional<VarType> optional_value_as(std::size_t index) const noexcept { if (is_nodata(index)) { return std::optional<VarType>(); } else { return static_cast<VarType>(_data(index)); } } bool is_nodata_value(T value) const noexcept { if constexpr (raster_type_has_nan) { return std::isnan(value); } else { if (_meta.nodata.has_value()) { return value == *_meta.nodata; } else { return false; } } } bool is_nodata(std::size_t index) const noexcept { if (_meta.nodata.has_value()) { if constexpr (raster_type_has_nan) { return std::isnan(_data(index)); } else { return _data(index) == static_cast<T>(*_meta.nodata); } } return false; } bool is_nodata(const Cell& cell) const noexcept { return is_nodata(cell.r, cell.c); } bool is_nodata(int32_t r, int32_t c) const noexcept { if (_meta.nodata.has_value()) { if constexpr (raster_type_has_nan) { return std::isnan(_data(r, c)); } else { return _data(r, c) == static_cast<T>(*_meta.nodata); } } return false; } bool tolerant_equal_to(const DenseRaster<T>& other, value_type tolerance = std::numeric_limits<value_type>::epsilon()) const noexcept { if (_meta != other._meta) { return false; } return tolerant_data_equal_to(other, tolerance); } bool tolerant_data_equal_to(const DenseRaster<T>& other, value_type relTolerance = value_type(1e-05)) const noexcept { throw_on_size_mismatch(*this, other); cpu::float_equal_to<T> comp(relTolerance); const auto dataSize = size(); for (std::size_t i = 0; i < dataSize; ++i) { if (is_nodata(i) != other.is_nodata(i)) { return false; } if (!is_nodata(i) && !comp(_data(i), other[i])) { return false; } } return true; } /* Add the value to the cell, if the cell is nodata it will become data with the provided value */ void add_to_cell(Cell c, T value) { if (is_nodata(c)) { (*this)[c] = value; } else { (*this)[c] += value; } } bool operator==(const DenseRaster<T>& other) const noexcept { throw_on_size_mismatch(*this, other); const auto dataSize = size(); for (std::size_t i = 0; i < dataSize; ++i) { if (is_nodata(i) != other.is_nodata(i)) { return false; } if (!is_nodata(i) && (_data(i) != other[i])) { return false; } } return true; } bool operator!=(const DenseRaster<T>& other) const noexcept { return !(*this == other); } DenseRaster<uint8_t> not_equals(const DenseRaster<T>& other) const noexcept { throw_on_size_mismatch(*this, other); return perform_binary_operation<nodata::not_equal_to>(other); } template <typename TValue> DenseRaster<uint8_t> not_equals(TValue value) const { static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type"); return perform_unary_operation<nodata::not_equal_to>(value); } template <typename TOther> auto operator+(const DenseRaster<TOther>& other) const { throw_on_size_mismatch(*this, other); return perform_raster_operation<std::plus>(other); } template <typename TValue> auto operator+(TValue value) const { static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type"); return perform_scalar_operation<nodata::plus_scalar>(value); } DenseRaster<T>& operator+=(T value) { static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type"); nodata::plus_scalar<T> pred(nodata(), value); for (auto& elem : _data) { elem = pred(elem); } return *this; } template <typename TOther> DenseRaster<T>& operator+=(const DenseRaster<TOther>& other) { throw_on_size_mismatch(*this, other); const auto dataSize = size(); for (std::size_t i = 0; i < dataSize; ++i) { bool leftis_nodata = is_nodata(i); if (leftis_nodata != other.is_nodata(i)) { if (leftis_nodata) { mark_as_data(i); _data(i) = static_cast<T>(other[i]); } continue; } if (!leftis_nodata) { _data(i) += static_cast<T>(other[i]); } } return *this; } DenseRaster<T> operator-() const { if constexpr (std::is_unsigned_v<T>) { throw RuntimeError("Minus operator applied to unsigned value"); } else { DenseRaster<T> result(_meta, DenseRaster<T>::data_type(_data)); std::transform(result.begin(), result.end(), result.begin(), nodata::negate<T>(_meta.nodata)); return result; } } template <typename TOther> auto operator-(const DenseRaster<TOther>& other) const { throw_on_size_mismatch(*this, other); return perform_raster_operation<std::minus>(other); } template <typename TValue> auto operator-(TValue value) const { static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type"); return perform_scalar_operation<nodata::minus_scalar>(value); } DenseRaster<T>& operator-=(T value) { static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type"); nodata::minus_scalar<T> pred(nodata(), value); for (auto& elem : _data) { elem = pred(elem); } return *this; } template <typename TOther> auto operator*(const DenseRaster<TOther>& other) const { throw_on_size_mismatch(*this, other); return perform_raster_operation<std::multiplies>(other); } template <typename TValue> auto operator*(TValue value) const { static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type"); return perform_scalar_operation<nodata::multiplies_scalar>(value); } DenseRaster<T>& operator*=(T value) { static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type"); nodata::multiplies_scalar<T> pred(nodata(), value); for (auto& elem : _data) { elem = pred(elem); } return *this; } template <typename TOther> DenseRaster<T>& operator*=(const DenseRaster<TOther>& other) { throw_on_size_mismatch(*this, other); if constexpr (raster_type_has_nan) { _data *= other._data; } else { const auto dataSize = size(); for (size_t i = 0; i < dataSize; ++i) { if (is_nodata(i) || other.is_nodata(i)) { mark_as_nodata(i); } else { _data(i) *= static_cast<T>(other[i]); } } } return *this; } template <typename TOther> auto operator/(const DenseRaster<TOther>& other) const { throw_on_size_mismatch(*this, other); using TResult = decltype(0.f * TOther()); // use float or double as result type DenseRaster<TResult> result(_meta); if (!_meta.nodata.has_value() && other.metadata().nodata.has_value()) { result.set_nodata(*other.metadata().nodata); } if (!result.nodata().has_value()) { result.set_nodata(std::numeric_limits<TResult>::quiet_NaN()); } TResult nodata = result.nodata().value(); if constexpr (std::numeric_limits<TResult>::has_quiet_NaN) { nodata = std::numeric_limits<TResult>::quiet_NaN(); } #pragma omp parallel for for (size_t i = 0; i < size(); ++i) { auto v = other[i]; if (v == 0) { result[i] = nodata; } else { if (is_nodata(i) || other.is_nodata(i)) { result[i] = nodata; } else { result[i] = static_cast<TResult>(_data(i)) / other[i]; } } } return result; } template <typename TValue> auto operator/(TValue value) const { static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type"); if (value == 0) { throw InvalidArgument("Division by zero"); } return perform_scalar_operation<nodata::divides_scalar>(value); } DenseRaster<T>& operator/=(T value) { static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type"); nodata::divides_scalar<T> pred(nodata(), value); for (auto& elem : _data) { elem = pred(elem); } return *this; } template <typename TOther> DenseRaster<T>& operator/=(const DenseRaster<TOther>& other) { throw_on_size_mismatch(*this, other); const auto dataSize = size(); for (size_t i = 0; i < dataSize; ++i) { if (is_nodata(i) != other.is_nodata(i)) { continue; } _data(i) /= static_cast<T>(other[i]); } return *this; } value_type& operator[](std::size_t index) { return _data(index); } value_type operator[](std::size_t index) const { return _data(index); } value_type& operator[](const Cell& cell) { return _data(cell.r, cell.c); } const value_type& operator[](const Cell& cell) const { return _data(cell.r, cell.c); } value_type& operator()(int32_t row, int32_t col) { return _data(row, col); } const value_type& operator()(int32_t row, int32_t col) const { return _data(row, col); } DenseRaster<uint8_t> operator!() const { return perform_unary_operation<nodata::logical_not>(); } template <typename TOther> DenseRaster<uint8_t> operator&&(const DenseRaster<TOther>& other) const { return perform_binary_operation<nodata::logical_and>(other); } template <typename TOther> DenseRaster<uint8_t> operator||(const DenseRaster<TOther>& other) const { return perform_binary_operation<nodata::logical_or>(other); } template <typename TOther> DenseRaster<uint8_t> operator>(const DenseRaster<TOther>& other) const { return perform_binary_operation<nodata::greater>(other); } DenseRaster<uint8_t> operator>(T threshold) const { return perform_unary_operation<nodata::greater>(threshold); } template <typename TOther> DenseRaster<uint8_t> operator>=(const DenseRaster<TOther>& other) const { return perform_binary_operation<nodata::greater_equal>(other); } DenseRaster<uint8_t> operator>=(T threshold) const { return perform_unary_operation<nodata::greater_equal>(threshold); } template <typename TOther> DenseRaster<uint8_t> operator<(const DenseRaster<TOther>& other) const { return perform_binary_operation<nodata::less>(other); } DenseRaster<uint8_t> operator<(T threshold) const { return perform_unary_operation<nodata::less>(threshold); } template <typename TOther> DenseRaster<uint8_t> operator<=(const DenseRaster<TOther>& other) const { return perform_binary_operation<nodata::less_equal>(other); } DenseRaster<uint8_t> operator<=(T threshold) const { return perform_unary_operation<nodata::less_equal>(threshold); } void replace(T oldValue, T newValue) noexcept { std::replace(begin(), end(), oldValue, newValue); } std::string to_string() const { if constexpr (std::is_same_v<uint8_t, T>) { DenseRaster<uint16_t> copy(_meta); std::copy(begin(), end(), copy.begin()); return copy.to_string(); } else { std::stringstream ss; for (int i = 0; i < rows(); ++i) { std::span<const T> row(&_data[i * cols()], cols()); ss << inf::str::join(row, ", ") << "\n"; } return ss.str(); } } void init_nodata_values() { if (_meta.nodata.has_value()) { if constexpr (raster_type_has_nan) { std::replace(begin(), end(), static_cast<value_type>(*_meta.nodata), std::numeric_limits<value_type>::quiet_NaN()); } } } private: static void throw_on_datasize_mismatch(int32_t rows, int32_t cols, size_t dataSize) { if (static_cast<size_t>(rows * cols) != dataSize) { throw InvalidArgument("Raster data size does not match provided dimensions {} vs {}x{}", dataSize, rows, cols); } } // Performs a unary operation on all the elements that results in true or false template <template <typename> typename BinaryPredicate, typename TOther> DenseRaster<uint8_t> perform_unary_operation(TOther value) const { DenseRaster<uint8_t> result(_meta); if (_meta.nodata.has_value()) { result.set_nodata(static_cast<double>(std::numeric_limits<uint8_t>::max())); } auto pred = BinaryPredicate<T>(_meta.nodata, std::optional<double>()); const auto size = result.size(); #pragma omp parallel for for (std::size_t i = 0; i < size; ++i) { result[i] = pred(_data(i), static_cast<T>(value)); } return result; } template <template <typename> typename UnaryPredicate> DenseRaster<uint8_t> perform_unary_operation() const { DenseRaster<uint8_t> result(_meta); if (_meta.nodata) { result.set_nodata(static_cast<double>(std::numeric_limits<uint8_t>::max())); } std::transform(cbegin(), cend(), result.begin(), UnaryPredicate<T>(_meta.nodata)); return result; } template <template <typename> typename BinaryPredicate, typename TOther> DenseRaster<uint8_t> perform_binary_operation(const DenseRaster<TOther>& other) const { throw_on_size_mismatch(*this, other); using WidestType = decltype(T() * TOther()); DenseRaster<uint8_t> result(_meta); if (_meta.nodata.has_value() || other.metadata().nodata.has_value()) { result.set_nodata(std::numeric_limits<uint8_t>::max()); } auto pred = BinaryPredicate<WidestType>(_meta.nodata, other.metadata().nodata); const auto size = result.size(); #pragma omp parallel for for (std::size_t i = 0; i < size; ++i) { result[i] = pred(static_cast<WidestType>(_data(i)), static_cast<WidestType>(other[i])); } return result; } template <template <typename> typename UnaryPredicate, typename TScalar> auto perform_scalar_operation(TScalar scalar) const { using WidestType = decltype(T() * TScalar()); auto pred = UnaryPredicate<WidestType>(_meta.nodata, static_cast<WidestType>(scalar)); DenseRaster<WidestType> result(_meta); std::transform(cbegin(), cend(), result.begin(), [this, pred](T value) { if (is_nodata_value(value)) { return value; } return pred(value); }); return result; } template <template <typename> typename BinaryPredicate, typename TOther> auto perform_raster_operation(const DenseRaster<TOther>& other) const { using WidestType = decltype(T() * TOther()); DenseRaster<WidestType> result(_meta); if (!_meta.nodata.has_value() && other.metadata().nodata.has_value()) { result.set_nodata(*other.metadata().nodata); } auto operation = BinaryPredicate<WidestType>(); auto nodata = result.nodata().value_or(0); if constexpr (std::numeric_limits<WidestType>::has_quiet_NaN) { nodata = std::numeric_limits<WidestType>::quiet_NaN(); } #pragma omp parallel for for (std::size_t i = 0; i < size(); ++i) { if (is_nodata(i) || other.is_nodata(i)) { result[i] = nodata; } else { result[i] = operation(static_cast<WidestType>(_data(i)), static_cast<WidestType>(other[i])); } } return result; } RasterMetadata _meta; data_type _data; }; template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>> DenseRaster<T> operator+(TScalar lhs, const DenseRaster<T>& rhs) { return rhs + lhs; } template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>> auto operator-(TScalar value, const DenseRaster<T>& rhs) { using ResultType = decltype(TScalar() - T()); DenseRaster<ResultType> result(rhs.metadata()); std::transform(begin(rhs), end(rhs), begin(result), nodata::minus_scalar_first<ResultType>(rhs.metadata().nodata, static_cast<ResultType>(value))); return result; } template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>> DenseRaster<T> operator*(TScalar lhs, const DenseRaster<T>& rhs) { return rhs * lhs; } template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>> auto operator/(TScalar scalar, const DenseRaster<T>& rhs) { //throw_on_size_mismatch(other); //// For nan nodata, standard eigen operator can be used //if constexpr (typeHasNaN() && std::is_same_v<T, TOther>) { // // all types are the same, no casts needed // return DenseRaster<T>(_meta, _data / other._data); //} //return performRasterOperation<nodata::divides>(other); using ResultType = decltype(1.0f * T()); static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type"); DenseRaster<ResultType> result(rhs.metadata()); for (std::size_t i = 0; i < rhs.size(); ++i) { auto value = rhs[i]; if (value == 0) { if (!result.nodata().has_value()) { throw InvalidArgument("Division by raster that contains 0 values"); } result.mark_as_nodata(i); } else { result[i] = scalar / static_cast<ResultType>(value); } } return result; } template <typename T> auto cbegin(const DenseRaster<T>& ras) { return ras.data(); } template <typename T> auto cend(const DenseRaster<T>& ras) { return ras.cend(); } template <typename T> auto begin(DenseRaster<T>& ras) { return ras.begin(); } template <typename T> auto begin(const DenseRaster<T>& ras) { return ras.begin(); } template <typename T> auto end(DenseRaster<T>& ras) { return ras.end(); } template <typename T> auto end(const DenseRaster<T>& ras) { return ras.cend(); } template <typename T> const T* data(const DenseRaster<T>& ras) { return ras.data(); } template <typename T> T* data(DenseRaster<T>& ras) { return ras.data(); } template <typename T> auto size(const DenseRaster<T>& ras) { return ras.size(); } }
trsm_x_csc_n_lo_col.c
#include "alphasparse/opt.h" #include "alphasparse/kernel.h" #include "alphasparse/util.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; ALPHA_Number* diag=(ALPHA_Number*) alpha_malloc(n*sizeof(ALPHA_Number)); memset(diag, '\0', m * sizeof(ALPHA_Number)); ALPHA_INT num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT c = 0; c < n; c++) { for (ALPHA_INT ai = A->cols_start[c]; ai < A->cols_end[c]; ai++) { ALPHA_INT ar = A->row_indx[ai]; if (ar == c) { diag[c] = A->values[ai]; } } } #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns;out_y_col++){ for(int i = 0 ; i < n ; i++){ //initialize y[] as x[]*aplha alpha_mul(y[index2(out_y_col,i,ldy)], alpha, x[index2(out_y_col,i,ldx)]); } //following processing simulates Gaussian Elimination for(ALPHA_INT c = 0; c < n; ++c){//csc format, traverse by column alpha_div(y[index2(out_y_col,c,ldy)], y[index2(out_y_col,c,ldy)], diag[c]); for(ALPHA_INT ai = A->cols_start[c]; ai < A->cols_end[c];ai++){ ALPHA_INT ar = A->row_indx[ai]; if(c < ar){ alpha_msube(y[index2(out_y_col,ar,ldy)], A->values[ai], y[index2(out_y_col,c,ldy)]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
load.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include "sptensor.h" #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <numa.h> struct ftype { char * extension; int type; }; static struct ftype file_extensions[] = { { ".tns", 0 }, { ".coo", 0 }, { ".bin", 1 }, { NULL, 0} }; static int get_file_type( char const * const fname) { /* find last . in filename */ char const * const suffix = strrchr(fname, '.'); if(suffix == NULL) { goto NOT_FOUND; } size_t idx = 0; do { if(strcmp(suffix, file_extensions[idx].extension) == 0) { return file_extensions[idx].type; } } while(file_extensions[++idx].extension != NULL); /* default to text coordinate format */ NOT_FOUND: fprintf(stderr, "extension for '%s' not recognized. " "Defaulting to ASCII coordinate form.\n", fname); return 0; } static int p_tt_read_file(sptSparseTensor *tsr, sptIndex start_index, FILE *fp) { int iores, retval; sptIndex mode; iores = fscanf(fp, "%u", &tsr->nmodes); spt_CheckOSError(iores < 0, "SpTns Load"); /* Only allocate space for sortorder, initialized to 0s. */ tsr->sortorder = malloc(tsr->nmodes * sizeof tsr->sortorder[0]); spt_CheckOSError(!tsr->sortorder, "SpTns Load"); memset(tsr->sortorder, 0, tsr->nmodes * sizeof tsr->sortorder[0]); tsr->ndims = malloc(tsr->nmodes * sizeof *tsr->ndims); spt_CheckOSError(!tsr->ndims, "SpTns Load"); for(mode = 0; mode < tsr->nmodes; ++mode) { iores = fscanf(fp, "%u", &tsr->ndims[mode]); spt_CheckOSError(iores != 1, "SpTns Load"); } tsr->nnz = 0; tsr->inds = malloc(tsr->nmodes * sizeof *tsr->inds); spt_CheckOSError(!tsr->inds, "SpTns Load"); for(mode = 0; mode < tsr->nmodes; ++mode) { retval = sptNewIndexVector(&tsr->inds[mode], 0, 0); spt_CheckError(retval, "SpTns Load", NULL); } retval = sptNewValueVector(&tsr->values, 0, 0); spt_CheckError(retval, "SpTns Load", NULL); while(retval == 0) { double value; for(mode = 0; mode < tsr->nmodes; ++mode) { sptIndex index; iores = fscanf(fp, "%u", &index); if(iores != 1) { retval = -1; break; } if(index < start_index) { spt_CheckError(SPTERR_VALUE_ERROR, "SpTns Load", "index < start_index"); } sptAppendIndexVector(&tsr->inds[mode], index-start_index); } if(retval == 0) { iores = fscanf(fp, "%lf", &value); if(iores != 1) { retval = -1; break; } sptAppendValueVector(&tsr->values, value); ++tsr->nnz; } } for(mode = 0; mode < tsr->nmodes; ++mode) { tsr->inds[mode].len = tsr->nnz; } // sptSparseTensorCollectZeros(tsr); return 0; } static void read_binary_header( FILE * fin, bin_header * header) { fread(&(header->magic), sizeof(header->magic), 1, fin); fread(&(header->idx_width), sizeof(header->idx_width), 1, fin); fread(&(header->val_width), sizeof(header->val_width), 1, fin); if(header->idx_width > PARTI_INDEX_TYPEWIDTH / 8) { fprintf(stderr, "ERROR input has %lu-bit integers. " "Build with PARTI_INDEX_TYPEWIDTH %lu\n", header->idx_width * 8, header->idx_width * 8); exit(-1); } if(header->val_width > PARTI_VALUE_TYPEWIDTH / 8) { fprintf(stderr, "WARNING input has %lu-bit floating-point values. " "Build with PARTI_VALUE_TYPEWIDTH %lu for full precision\n", header->val_width * 8, header->val_width * 8); } } static void fill_binary_idx( sptIndex * const buffer, sptIndex const count, bin_header const * const header, FILE * fin) { if(header->idx_width == sizeof(sptIndex)) { fread(buffer, sizeof(sptIndex), count, fin); } else { /* read in uint32_t in a buffered fashion */ sptIndex const BUF_LEN = 1024*1024; uint32_t * ubuf = (uint32_t*)malloc(BUF_LEN * sizeof(*ubuf)); for(sptIndex n=0; n < count; n += BUF_LEN) { sptIndex const read_count = BUF_LEN < count - n ? BUF_LEN : count - n; fread(ubuf, sizeof(*ubuf), read_count, fin); #pragma omp parallel for schedule(static) for(sptIndex i=0; i < read_count; ++i) { buffer[n + i] = ubuf[i]; } } free(ubuf); } } static void fill_binary_nnzidx( sptNnzIndex * const buffer, sptIndex const count, bin_header const * const header, FILE * fin) { if(header->idx_width == sizeof(sptNnzIndex)) { fread(buffer, sizeof(sptNnzIndex), count, fin); } else { /* read in uint32_t in a buffered fashion */ sptIndex const BUF_LEN = 1024*1024; uint32_t * ubuf = (uint32_t*)malloc(BUF_LEN * sizeof(*ubuf)); for(sptIndex n=0; n < count; n += BUF_LEN) { sptIndex const read_count = BUF_LEN < count - n ? BUF_LEN : count - n; fread(ubuf, sizeof(*ubuf), read_count, fin); #pragma omp parallel for schedule(static) for(sptIndex i=0; i < read_count; ++i) { buffer[n + i] = ubuf[i]; } } free(ubuf); } } static void fill_binary_val( sptValue * const buffer, sptIndex const count, bin_header const * const header, FILE * fin) { if(header->val_width == sizeof(sptValue)) { fread(buffer, sizeof(sptValue), count, fin); } else { /* read in float in a buffered fashion */ sptIndex const BUF_LEN = 1024*1024; /* select whichever *is not* configured with. */ #if PARTI_VALUE_TYPEWIDTH == 64 float * ubuf = (float*)malloc(BUF_LEN * sizeof(*ubuf)); #else double * ubuf = (double*)malloc(BUF_LEN * sizeof(*ubuf)); #endif for(sptIndex n=0; n < count; n += BUF_LEN) { sptIndex const read_count = BUF_LEN < count - n ? BUF_LEN : count - n; fread(ubuf, sizeof(*ubuf), read_count, fin); #pragma omp parallel for schedule(static) for(sptIndex i=0; i < read_count; ++i) { buffer[n + i] = ubuf[i]; } } free(ubuf); } } /** * @brief Read a COORD tensor from a binary file, converting from smaller idx or * val precision if necessary. * * @param fin The file to read from. * * @return The parsed tensor. */ static int p_tt_read_binary_file(sptSparseTensor *tsr, FILE * fin) { int result; bin_header header; read_binary_header(fin, &header); // printf("header.magic: %d\n", header.magic); // printf("header.idx_width: %lu\n", header.idx_width); // printf("header.val_width: %lu\n", header.val_width); sptNnzIndex nnz = 0; sptIndex nmodes = 0; fill_binary_idx(&nmodes, 1, &header, fin); sptIndex * dims = (sptIndex *) malloc (nmodes * sizeof(*dims)); fill_binary_idx(dims, nmodes, &header, fin); fill_binary_nnzidx(&nnz, 1, &header, fin); /* allocate structures */ sptNewSparseTensor(tsr, nmodes, dims); tsr->nnz = nnz; for(sptIndex m=0; m < nmodes; ++m) { result = sptResizeIndexVector(&tsr->inds[m], nnz); spt_CheckError(result, "SpTns Read", NULL); } result = sptResizeValueVector(&tsr->values, nnz); spt_CheckError(result, "SpTns Read", NULL); /* fill in tensor data */ for(sptIndex m=0; m < nmodes; ++m) { fill_binary_idx(tsr->inds[m].data, nnz, &header, fin); } fill_binary_val(tsr->values.data, nnz, &header, fin); return 0; } //numa static int p_tt_read_binary_file_numa(sptSparseTensor *tsr, FILE * fin, int numa_node) { int result; bin_header header; read_binary_header(fin, &header); // printf("header.magic: %d\n", header.magic); // printf("header.idx_width: %lu\n", header.idx_width); // printf("header.val_width: %lu\n", header.val_width); sptNnzIndex nnz = 0; sptIndex nmodes = 0; fill_binary_idx(&nmodes, 1, &header, fin); sptIndex * dims = (sptIndex *) numa_alloc_onnode (nmodes * sizeof(*dims), numa_node); fill_binary_idx(dims, nmodes, &header, fin); fill_binary_nnzidx(&nnz, 1, &header, fin); /* allocate structures */ sptNewSparseTensorNuma(tsr, nmodes, dims, numa_node); tsr->nnz = nnz; for(sptIndex m=0; m < nmodes; ++m) { result = sptResizeIndexVectorNuma(&tsr->inds[m], nnz); spt_CheckError(result, "SpTns Read", NULL); } result = sptResizeValueVectorNuma(&tsr->values, nnz); spt_CheckError(result, "SpTns Read", NULL); /* fill in tensor data */ for(sptIndex m=0; m < nmodes; ++m) { fill_binary_idx(tsr->inds[m].data, nnz, &header, fin); } fill_binary_val(tsr->values.data, nnz, &header, fin); return 0; } /** * Load the contents of a sparse tensor fro a text file * @param tsr th sparse tensor to store into * @param start_index the index of the first element in array. Set to 1 for MATLAB compability, else set to 0 * @param fp the file to read from */ int sptLoadSparseTensor(sptSparseTensor *tsr, sptIndex start_index, char const * const fname) { FILE * fp = fopen(fname, "r"); sptAssert(fp != NULL); int iores; switch(get_file_type(fname)) { case 0: iores = p_tt_read_file(tsr, start_index, fp); spt_CheckOSError(iores != 0, "SpTns Load"); break; case 1: iores = p_tt_read_binary_file(tsr, fp); spt_CheckOSError(iores != 0, "SpTns Load"); break; } fclose(fp); return 0; } //numa int sptLoadSparseTensorNuma(sptSparseTensor *tsr, sptIndex start_index, char const * const fname, int numa_node) { FILE * fp = fopen(fname, "r"); sptAssert(fp != NULL); int iores; switch(get_file_type(fname)) { case 0: iores = p_tt_read_file(tsr, start_index, fp); spt_CheckOSError(iores != 0, "SpTns Load"); break; case 1: iores = p_tt_read_binary_file_numa(tsr, fp, numa_node); spt_CheckOSError(iores != 0, "SpTns Load"); break; } fclose(fp); return 0; } void sptLoadShuffleFile(sptSparseTensor *tsr, FILE *fs, sptIndex ** map_inds) { sptNnzIndex line_count = 0; sptNnzIndex dim_count = 0; // int iores; for(sptIndex mode = 0; mode < tsr->nmodes; ++mode) { dim_count += tsr->ndims[mode]; for(sptIndex i = 0; i < tsr->ndims[mode]; ++i) { fscanf(fs, "%u", &(map_inds[mode][i])); -- map_inds[mode][i]; ++ line_count; } } sptAssert(dim_count == line_count); return; }
GB_binop__isge_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__isge_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__isge_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__isge_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_fp64) // A*D function (colscale): GB (_AxD__isge_fp64) // D*A function (rowscale): GB (_DxB__isge_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__isge_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__isge_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_fp64) // C=scalar+B GB (_bind1st__isge_fp64) // C=scalar+B' GB (_bind1st_tran__isge_fp64) // C=A+scalar GB (_bind2nd__isge_fp64) // C=A'+scalar GB (_bind2nd_tran__isge_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_FP64 || GxB_NO_ISGE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tr_db.c
/* * Copyright (©) 2015-2016 Lucas Maugère, Thomas Mijieux * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <mysql/mysql.h> #include "osux.h" #include "taiko_ranking_map.h" #include "taiko_ranking_score.h" #include "taiko_ranking_object.h" #include "tr_db.h" #include "config.h" #include "tr_mods.h" #include "print.h" #ifdef USE_TR_MYSQL_DB #define TR_DB_NAME "taiko_rank" #define TR_DB_USER "tr_user" #define TR_DB_MAPSET "tr_mapset" #define TR_DB_DIFF "tr_diff" #define TR_DB_MOD "tr_mod" #define TR_DB_SCORE "tr_score" static MYSQL *sql; static int new_rq(MYSQL *sql, const char *rq, ...); static int tr_db_get_id(MYSQL *sql, char *table, char *cond); static char *tr_db_escape_str(MYSQL *sql, const char *src); static int tr_db_insert_user(const struct tr_map *map); static int tr_db_insert_mapset(const struct tr_map *map, int user_id); static int tr_db_insert_diff(const struct tr_map *map, int mapset_id); static int tr_db_insert_mod(const struct tr_map *map); static int tr_db_insert_update_score(const struct tr_map *map, int diff_id, int mod_id); //------------------------------------------------- static void tr_db_exit(void) { if (sql != NULL) mysql_close(sql); } void tr_db_init(void) { sql = mysql_init(NULL); if (sql == NULL) { tr_error("Error: mysql init"); return; } if (NULL == mysql_real_connect( sql, GLOBAL_CONFIG->db_ip, GLOBAL_CONFIG->db_login, GLOBAL_CONFIG->db_passwd, NULL, 0, NULL, 0)) { tr_error("%s", mysql_error(sql)); mysql_close(sql); sql = NULL; return; } new_rq(sql, "USE %s;", TR_DB_NAME); atexit(tr_db_exit); } //------------------------------------------------- static int new_rq(MYSQL *sql, const char *rq, ...) { va_list va; va_start(va, rq); char *buf = NULL; vasprintf(&buf, rq, va); va_end(va); if (mysql_query(sql, buf)) { tr_error("'%s' request: '%s'", mysql_error(sql), buf); free(buf); return -1; } free(buf); return 0; } //------------------------------------------------- static int tr_db_get_id(MYSQL *sql, char *table, char *cond) { MYSQL_RES *result; MYSQL_ROW row; #pragma omp critical { new_rq(sql, "SELECT * FROM %s WHERE %s;", table, cond); result = mysql_store_result(sql); row = mysql_fetch_row(result); } int id = -1; if (row != NULL) id = atoi(row[0]); return id; } //------------------------------------------------- static char *tr_db_escape_str(MYSQL *sql, const char *src) { unsigned int l = strlen(src); char *dst = malloc(sizeof(char) * (2 * l + 1)); mysql_real_escape_string(sql, dst, src, l); return dst; } //------------------------------------------------- static int tr_db_insert_user(const struct tr_map *map) { char *map_creator = tr_db_escape_str(sql, map->creator); char *cond = NULL; asprintf(&cond, "name = '%s'", map_creator); int user_id = tr_db_get_id(sql, TR_DB_USER, cond); if (user_id < 0) { #pragma omp critical new_rq(sql,"INSERT INTO %s(name, density_star, reading_star," "pattern_star, accuracy_star, final_star)" "VALUES('%s', 0, 0, 0, 0, 0);", TR_DB_USER, map_creator); user_id = tr_db_get_id(sql, TR_DB_USER, cond); fprintf(OUTPUT_INFO, "New user: %s, ID: %d\n", map_creator, user_id); } free(cond); free(map_creator); return user_id; } //------------------------------------------------- static int tr_db_insert_mapset(const struct tr_map *map, int user_id) { char *map_title = tr_db_escape_str(sql, map->title); char *map_artist = tr_db_escape_str(sql, map->artist); char *map_source = tr_db_escape_str(sql, map->source); char *map_artist_uni = tr_db_escape_str(sql, map->artist_uni); char *map_title_uni = tr_db_escape_str(sql, map->title_uni); char *cond = NULL; asprintf(&cond, "creator_ID = %d and artist = '%s' and " "title = '%s'", user_id, map_artist, map_title); int mapset_id = tr_db_get_id(sql, TR_DB_MAPSET, cond); if (mapset_id < 0) { #pragma omp critical new_rq(sql, "INSERT INTO %s(artist, title, source, " "creator_ID, artist_uni, title_uni, osu_mapset_ID)" "VALUES('%s', '%s', '%s', %d, '%s', '%s', %d);", TR_DB_MAPSET, map_artist, map_title, map_source, user_id, map_artist_uni, map_title_uni, map->mapset_osu_ID); mapset_id = tr_db_get_id(sql, TR_DB_MAPSET, cond); fprintf(OUTPUT_INFO, "New mapset: %s - %s ID: %d\n", map_artist, map_title, mapset_id); } free(cond); free(map_title); free(map_artist); free(map_source); free(map_artist_uni); free(map_title_uni); return mapset_id; } //------------------------------------------------- static int tr_db_insert_diff(const struct tr_map *map, int mapset_id) { char *map_diff = tr_db_escape_str(sql, map->diff); char *cond = NULL; asprintf(&cond, "mapset_ID = %d and diff_name = '%s'", mapset_id, map_diff); int diff_id = tr_db_get_id(sql, TR_DB_DIFF, cond); if (diff_id < 0) { #pragma omp critical new_rq(sql, "INSERT INTO %s(diff_name, mapset_ID, osu_diff_ID," "max_combo, bonus, hash)" "VALUES('%s', %d, %d, %d, %d, '%s');", TR_DB_DIFF, map_diff, mapset_id, map->diff_osu_ID, map->max_combo, map->bonus, map->hash); diff_id = tr_db_get_id(sql, TR_DB_DIFF, cond); fprintf(OUTPUT_INFO, "New diff: %s ID: %d\n", map_diff, diff_id); } free(cond); free(map_diff); return diff_id; } //------------------------------------------------- static int tr_db_insert_mod(const struct tr_map *map) { char *mod_str = trm_mods_to_str(map); char *cond = NULL; asprintf(&cond, "mod_name = '%s'", mod_str); int mod_id = tr_db_get_id(sql, TR_DB_MOD, cond); if (mod_id < 0) { #pragma omp critical new_rq(sql, "INSERT INTO %s(mod_name) VALUES('%s');", TR_DB_MOD, mod_str); mod_id = tr_db_get_id(sql, TR_DB_MOD, cond); fprintf(OUTPUT_INFO, "New mod: %s ID: %d\n", mod_str, mod_id); } free(cond); free(mod_str); return mod_id; } //------------------------------------------------- static int tr_db_insert_update_score(const struct tr_map *map, int diff_id, int mod_id) { char *cond = NULL; asprintf(&cond, "diff_ID = %d and mod_ID = %d and great = %d" " and good = %d and miss = %d", diff_id, mod_id, map->great, map->good, map->miss); int score_id = tr_db_get_id(sql, TR_DB_SCORE, cond); if (score_id < 0) { #pragma omp critical new_rq(sql, "INSERT INTO %s(diff_ID, mod_ID, accuracy, " "combo, great, good, miss, " "density_star, pattern_star, reading_star, " "accuracy_star, final_star)" "VALUES(%d, %d, %.4g, %d, %d, %d, %d, " "%.4g, %.4g, %.4g, %.4g, %.4g);", TR_DB_SCORE, diff_id, mod_id, map->acc, map->combo, map->great, map->good, map->miss, map->density_star, map->pattern_star, map->reading_star, map->accuracy_star, map->final_star); score_id = tr_db_get_id(sql, TR_DB_SCORE, cond); fprintf(OUTPUT_INFO, "New score: (%g%%) ID: %d\n", map->acc, score_id); } else { #pragma omp critical new_rq(sql, "UPDATE %s SET combo = %d, density_star = %.4g, " "reading_star = %.4g, pattern_star = %.4g," "accuracy_star = %.4g, final_star = %.4g " "WHERE ID = %d;", TR_DB_SCORE, map->combo, map->density_star, map->reading_star, map->pattern_star, map->accuracy_star, map->final_star, score_id); fprintf(OUTPUT_INFO, "Updated score: (%g%%) ID: %d\n", map->acc, score_id); } free(cond); return score_id; } //------------------------------------------------- void trm_db_insert(const struct tr_map *map) { if (sql == NULL) { tr_error("Couldn't connect to DB. Data won't be stored."); return; } int user_id = tr_db_insert_user(map); int mod_id = tr_db_insert_mod(map); int mapset_id = tr_db_insert_mapset(map, user_id); int diff_id = tr_db_insert_diff(map, mapset_id); tr_db_insert_update_score(map, diff_id, mod_id); } //------------------------------------------------- //------------------------------------------------- //------------------------------------------------- #else // USE_TR_MYSQL_DB #include "taiko_ranking_map.h" void tr_db_init(void) { } void trm_db_insert(const struct tr_map *map UNUSED) { tr_error("Database was not compiled!"); } #endif // USE_TR_MYSQL_DB
pzlange.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include "core_blas.h" #define A(m, n) (plasma_complex64_t*)plasma_tile_addr(A, m, n) /***************************************************************************//** * Parallel tile calculation of max, one, infinity or Frobenius matrix norm * for a general matrix. ******************************************************************************/ void plasma_pzlange(plasma_enum_t norm, plasma_desc_t A, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; switch (norm) { double stub; double *workspace; double *scale; double *sumsq; //================ // PlasmaMaxNorm //================ case PlasmaMaxNorm: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); core_omp_zlange(PlasmaMaxNorm, mvam, nvan, A(m, n), ldam, &stub, &work[A.mt*n+m], sequence, request); } } #pragma omp taskwait core_omp_dlange(PlasmaMaxNorm, A.mt, A.nt, work, A.mt, &stub, value, sequence, request); break; //================ // PlasmaOneNorm //================ case PlasmaOneNorm: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); core_omp_zlange_aux(PlasmaOneNorm, mvam, nvan, A(m, n), ldam, &work[A.n*m+n*A.nb], sequence, request); } } #pragma omp taskwait workspace = work + A.mt*A.n; core_omp_dlange(PlasmaInfNorm, A.n, A.mt, work, A.n, workspace, value, sequence, request); break; //================ // PlasmaInfNorm //================ case PlasmaInfNorm: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); core_omp_zlange_aux(PlasmaInfNorm, mvam, nvan, A(m, n), ldam, &work[A.m*n+m*A.mb], sequence, request); } } #pragma omp taskwait workspace = work + A.nt*A.m; core_omp_dlange(PlasmaInfNorm, A.m, A.nt, work, A.m, workspace, value, sequence, request); break; //====================== // PlasmaFrobeniusNorm //====================== case PlasmaFrobeniusNorm: scale = work; sumsq = work + A.mt*A.nt; for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); core_omp_zgessq(mvam, nvan, A(m, n), ldam, &scale[A.mt*n+m], &sumsq[A.mt*n+m], sequence, request); } } #pragma omp taskwait core_omp_dgessq_aux(A.mt*A.nt, scale, sumsq, value, sequence, request); break; } }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(12*t1+Ny+21,32)),floord(24*t2+Ny+20,32)),floord(24*t1-24*t2+Nz+Ny+19,32));t3++) { for (t4=max(max(max(0,ceild(3*t1-15,16)),ceild(24*t2-Nz-60,64)),ceild(32*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(12*t1+Nx+21,64)),floord(24*t2+Nx+20,64)),floord(32*t3+Nx+28,64)),floord(24*t1-24*t2+Nz+Nx+19,64));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),32*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),32*t3+30),64*t4+62),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }